"src/targets/vscode:/vscode.git/clone" did not exist on "edda9f93bd5fccbbefbd113941e90df3943bd8bc"
Commit 06d3406e authored by lintangsutawika's avatar lintangsutawika
Browse files

update

parent f23ae748
# Flan Prompt Templates
prompts:
"template-0":
doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is"
doc_to_target: """{{["Yes", "It's impossible to say", "No"][label]}}"""
doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nI think the answer is"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-1":
doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}"
doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-2":
doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}"
doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-3":
doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}"
doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-4":
doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:"
doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}"
doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nThe answer is:"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-5":
doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}"
doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-6":
doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}"
doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-7":
doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}"
doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-8":
doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}"
doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
# Flan Prompt Templates
prompts:
"template-0":
doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-1":
doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-2":
doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-3":
doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-4":
doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-5":
doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-6":
doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-7":
doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
"template-8":
doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}"
......@@ -8,6 +8,6 @@ metric_list:
ignore_punctuation: true
generation_kwargs:
until:
- "\n\n"
- "</s>"
do_sample: false
temperature: 0.0
......@@ -25,13 +25,13 @@ task:
dataset_path: anli
use_prompt: flan/prompt_templates/flan_anli.yaml:*
validation_split: dev_r3
# - include: flan/yaml_templates/held_in_template_yaml
# task: ai2_arc
# dataset_path: ARC-Easy
# use_prompt: local:*
# validation_split: validation
# - include: flan/yaml_templates/held_in_template_yaml
# task: ai2_arc
# dataset_path: ARC-Challange
# use_prompt: local:*
# validation_split: validation
- include: flan/yaml_templates/held_in_template_yaml
task: ai2_arc
dataset_path: ARC-Easy
use_prompt: local:*
validation_split: validation
- include: flan/yaml_templates/held_in_template_yaml
task: ai2_arc
dataset_path: ARC-Challange
use_prompt: local:*
validation_split: validation
group: flan_held_out
task:
- bbh
- mmlu
- bbh_flan
- mmlu_flan
......@@ -6,6 +6,7 @@ task:
use_prompt: promptsource:*
training_split: train
validation_split: validation
output_type: greedy_until
metric_list:
- metric: exact_match
aggregation: mean
......@@ -18,18 +19,6 @@ task:
use_prompt: promptsource:*
training_split: train
validation_split: validation
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
# Natural Language Inference
- dataset_path: super_glue
dataset_name: cb
use_prompt: promptsource:*
training_split: train
validation_split: validation
output_type: greedy_until
metric_list:
- metric: exact_match
......@@ -37,67 +26,86 @@ task:
higher_is_better: true
ignore_case: true
ignore_punctuation: true
- dataset_path: super_glue
dataset_name: rte
use_prompt: promptsource:*
training_split: train
validation_split: validation
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
- task: anli_r1
dataset_path: anli
use_prompt: promptsource:*
training_split: train_r1
validation_split: dev_r1
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
- task: anli_r2
dataset_path: anli
use_prompt: promptsource:*
training_split: train_r2
validation_split: dev_r2
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
- task: anli_r3
dataset_path: anli
use_prompt: promptsource:*
training_split: train_r3
validation_split: dev_r3
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
# Sentence Completion
- dataset_path: super_glue
dataset_name: copa
use_prompt: promptsource:*
training_split: train
validation_split: validation
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
# # Natural Language Inference
# - dataset_path: super_glue
# dataset_name: cb
# use_prompt: promptsource:*
# training_split: train
# validation_split: validation
# output_type: greedy_until
# metric_list:
# - metric: exact_match
# aggregation: mean
# higher_is_better: true
# ignore_case: true
# ignore_punctuation: true
# - dataset_path: super_glue
# dataset_name: rte
# use_prompt: promptsource:*
# training_split: train
# validation_split: validation
# output_type: greedy_until
# metric_list:
# - metric: exact_match
# aggregation: mean
# higher_is_better: true
# ignore_case: true
# ignore_punctuation: true
# - task: anli_r1
# dataset_path: anli
# use_prompt: promptsource:*
# training_split: train_r1
# validation_split: dev_r1
# output_type: greedy_until
# metric_list:
# - metric: exact_match
# aggregation: mean
# higher_is_better: true
# ignore_case: true
# ignore_punctuation: true
# - task: anli_r2
# dataset_path: anli
# use_prompt: promptsource:*
# training_split: train_r2
# validation_split: dev_r2
# output_type: greedy_until
# metric_list:
# - metric: exact_match
# aggregation: mean
# higher_is_better: true
# ignore_case: true
# ignore_punctuation: true
# - task: anli_r3
# dataset_path: anli
# use_prompt: promptsource:*
# training_split: train_r3
# validation_split: dev_r3
# output_type: greedy_until
# metric_list:
# - metric: exact_match
# aggregation: mean
# higher_is_better: true
# ignore_case: true
# ignore_punctuation: true
# # Sentence Completion
# - dataset_path: super_glue
# dataset_name: copa
# use_prompt: promptsource:*
# training_split: train
# validation_split: validation
# output_type: greedy_until
# metric_list:
# - metric: exact_match
# aggregation: mean
# higher_is_better: true
# ignore_case: true
# ignore_punctuation: true
# Natural Language Inference
- dataset_path: hellaswag
use_prompt: promptsource:*
training_split: train
validation_split: validation
output_type: greedy_until
metric_list:
- metric: exact_match
aggregation: mean
......@@ -110,6 +118,7 @@ task:
use_prompt: promptsource:*
training_split: train
validation_split: validation
output_type: greedy_until
metric_list:
- metric: exact_match
aggregation: mean
......
......@@ -27,3 +27,6 @@ def main() -> None:
if __name__ == "__main__":
main()
# https://raw.githubusercontent.com/suzgunmirac/BIG-Bench-Hard/main/cot-prompts/boolean_expressions.txt
......@@ -2,16 +2,14 @@ group: bbh
dataset_path: lukaemon/bbh
output_type: greedy_until
test_split: test
doc_to_text: "{{input}}"
doc_to_text: "Q: {{input}}\nA:"
doc_to_target: "{{target}}"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: false
generation_kwargs:
until:
- "\n\n"
- "</s>"
do_sample: false
temperature: 0.0
# Generated by _generate_configs.py
dataset_name: boolean_expressions
include: _template_yaml
task: bbh_boolean_expressions
# Generated by _generate_configs.py
dataset_name: causal_judgement
include: _template_yaml
task: bbh_causal_judgement
# Generated by _generate_configs.py
dataset_name: date_understanding
include: _template_yaml
task: bbh_date_understanding
# Generated by _generate_configs.py
dataset_name: disambiguation_qa
include: _template_yaml
task: bbh_disambiguation_qa
# Generated by _generate_configs.py
dataset_name: dyck_languages
include: _template_yaml
task: bbh_dyck_languages
# Generated by _generate_configs.py
dataset_name: formal_fallacies
include: _template_yaml
task: bbh_formal_fallacies
# Generated by _generate_configs.py
dataset_name: geometric_shapes
include: _template_yaml
task: bbh_geometric_shapes
# Generated by _generate_configs.py
dataset_name: hyperbaton
include: _template_yaml
task: bbh_hyperbaton
# Generated by _generate_configs.py
dataset_name: logical_deduction_five_objects
include: _template_yaml
task: bbh_logical_deduction_five_objects
# Generated by _generate_configs.py
dataset_name: logical_deduction_seven_objects
include: _template_yaml
task: bbh_logical_deduction_seven_objects
# Generated by _generate_configs.py
dataset_name: logical_deduction_three_objects
include: _template_yaml
task: bbh_logical_deduction_three_objects
# Generated by _generate_configs.py
dataset_name: movie_recommendation
include: _template_yaml
task: bbh_movie_recommendation
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment