From 9c74820471735651e07704c4ca89be33f5603507 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 7 Aug 2023 13:58:11 +0000 Subject: [PATCH 001/264] making t5 version of superglue prompt --- lm_eval/evaluator.py | 9 +++++++-- lm_eval/models/huggingface.py | 3 +++ lm_eval/tasks/super_glue/boolq/t5-prompt.yaml | 18 ++++++++++++++++++ lm_eval/tasks/super_glue/copa/t5-prompt.yaml | 4 ++-- .../tasks/super_glue/multirc/t5-prompt.yaml | 17 +++++++++++++++++ lm_eval/tasks/super_glue/rte/t5-prompt.yaml | 17 +++++++++++++++++ lm_eval/tasks/super_glue/wic/t5-prompt.yaml | 17 +++++++++++++++++ lm_eval/tasks/super_glue/wsc/t5-prompt.yaml | 3 +-- 8 files changed, 82 insertions(+), 6 deletions(-) create mode 100644 lm_eval/tasks/super_glue/boolq/t5-prompt.yaml create mode 100644 lm_eval/tasks/super_glue/multirc/t5-prompt.yaml create mode 100644 lm_eval/tasks/super_glue/rte/t5-prompt.yaml create mode 100644 lm_eval/tasks/super_glue/wic/t5-prompt.yaml diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index bb7147bd..81a838dd 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -114,7 +114,12 @@ def simple_evaluate( task_dict = lm_eval.tasks.get_task_dict(tasks) for task_name in task_dict.keys(): - config = task_dict[task_name]._config + + task_obj = task_dict[task_name] + if type(task_obj) == tuple: + group, task_obj = task_obj + + config = task_obj._config if num_fewshot is not None: if config["num_fewshot"] > 0: default_num_fewshot = config["num_fewshot"] @@ -122,7 +127,7 @@ def simple_evaluate( f"Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}" ) - task_dict[task_name]._config["num_fewshot"] = num_fewshot + task_obj._config["num_fewshot"] = num_fewshot if check_integrity: run_task_tests(task_list=tasks) diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index 8a448663..dc2270ae 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -1,3 +1,5 @@ +import os + import torch import transformers from transformers.models.auto.modeling_auto import ( @@ -74,6 +76,7 @@ class HFLM(LM): low_cpu_mem_usage: Optional[bool] = True, trust_remote_code: Optional[bool] = False, use_fast_tokenizer: Optional[bool] = True, + cache_dir: Optional[Union[str,os.PathLike]] = None, # arguments used for splitting a model across GPUs naively. # only used if `parallelize=True`. parallelize: Optional[bool] = False, diff --git a/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml b/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml new file mode 100644 index 00000000..6fecb167 --- /dev/null +++ b/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml @@ -0,0 +1,18 @@ +group: + - super-glue-t5-prompt +task: super_glue-boolq-t5-prompt +dataset_path: super_glue +dataset_name: boolq +training_split: train +validation_split: validation +output_type: greedy_until +doc_to_text: "boolq question: {{question}} passage {{passage}}" +doc_to_target: label +doc_to_choice: ['False', 'True'] +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + diff --git a/lm_eval/tasks/super_glue/copa/t5-prompt.yaml b/lm_eval/tasks/super_glue/copa/t5-prompt.yaml index 5eeeabff..e3f6f04a 100644 --- a/lm_eval/tasks/super_glue/copa/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/copa/t5-prompt.yaml @@ -6,9 +6,9 @@ dataset_name: copa training_split: train validation_split: validation output_type: greedy_until -doc_to_text: "copa choice1: {{choice1}} choice2: {{choice2}} question: {{question}}" +doc_to_text: "copa choice1: {{choice1}} choice2: {{choice2}} premise: {{premise}} question: {{question}}" doc_to_target: label -doc_to_choice: ['False', 'True'] +doc_to_choice: ['choice1', 'choice2'] metric_list: - metric: exact_match aggregation: mean diff --git a/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml b/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml new file mode 100644 index 00000000..e7a286fc --- /dev/null +++ b/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml @@ -0,0 +1,17 @@ +group: + - super-glue-t5-prompt +task: super_glue-multirc-t5-prompt +dataset_path: super_glue +dataset_name: multirc +training_split: train +validation_split: validation +output_type: greedy_until +doc_to_text: "multirc question: {{question}} answer: {{answer}} paragraph: {{paragraph}}" +doc_to_target: label +doc_to_choice: ['False', 'True'] +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true diff --git a/lm_eval/tasks/super_glue/rte/t5-prompt.yaml b/lm_eval/tasks/super_glue/rte/t5-prompt.yaml new file mode 100644 index 00000000..3da43b47 --- /dev/null +++ b/lm_eval/tasks/super_glue/rte/t5-prompt.yaml @@ -0,0 +1,17 @@ +group: + - super-glue-t5-prompt +task: super_glue-rte-t5-prompt +dataset_path: super_glue +dataset_name: rte +training_split: train +validation_split: validation +output_type: greedy_until +doc_to_text: "rte premise: {{premise}} hypothesis: {{hypothesis}}" +doc_to_target: label +doc_to_choice: ['entailment', 'not_entailment'] +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true diff --git a/lm_eval/tasks/super_glue/wic/t5-prompt.yaml b/lm_eval/tasks/super_glue/wic/t5-prompt.yaml new file mode 100644 index 00000000..8e41d05e --- /dev/null +++ b/lm_eval/tasks/super_glue/wic/t5-prompt.yaml @@ -0,0 +1,17 @@ +group: + - super-glue-t5-prompt +task: super_glue-wic-t5-prompt +dataset_path: super_glue +dataset_name: wic +training_split: train +validation_split: validation +output_type: greedy_until +doc_to_text: "wic sentence1: {{sentence1}} sentence2: {{sentence2}}" +doc_to_target: label +doc_to_choice: ['False', 'True'] +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true diff --git a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml index 2e57d511..a42ff135 100644 --- a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml @@ -7,8 +7,7 @@ training_split: train validation_split: validation output_type: greedy_until doc_to_text: !function "preprocess_wsc.t5_prompt_doc_to_text" -doc_to_target: label -doc_to_choice: ['False', 'True'] +doc_to_target: "{{[span1_text, span2_text][label]}}" metric_list: - metric: exact_match aggregation: mean -- GitLab From bee52bf2578a5cad020642917f688cd1cf7f05d3 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 7 Aug 2023 16:08:07 +0000 Subject: [PATCH 002/264] add : --- lm_eval/tasks/super_glue/cb/t5-prompt.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/super_glue/cb/t5-prompt.yaml b/lm_eval/tasks/super_glue/cb/t5-prompt.yaml index a79af99f..ee967912 100644 --- a/lm_eval/tasks/super_glue/cb/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/cb/t5-prompt.yaml @@ -6,7 +6,7 @@ dataset_name: cb training_split: train validation_split: validation output_type: greedy_until -doc_to_text: "cb hypothesis: {{hypothesis}} premise {{premise}}" +doc_to_text: "cb hypothesis: {{hypothesis}} premise: {{premise}}" doc_to_target: label doc_to_choice: ['entailment', 'contradiction', 'neutral'] metric_list: -- GitLab From d46d792de661fdbff0f221ba6322cd8e698694a0 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 7 Aug 2023 16:08:20 +0000 Subject: [PATCH 003/264] add : --- lm_eval/tasks/super_glue/boolq/t5-prompt.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml b/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml index 6fecb167..3c5936dc 100644 --- a/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml @@ -6,7 +6,7 @@ dataset_name: boolq training_split: train validation_split: validation output_type: greedy_until -doc_to_text: "boolq question: {{question}} passage {{passage}}" +doc_to_text: "boolq question: {{question}} passage: {{passage}}" doc_to_target: label doc_to_choice: ['False', 'True'] metric_list: -- GitLab From 0e26c0bd9c152cf51aa9a89812f584e781d23244 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 8 Aug 2023 06:54:41 +0000 Subject: [PATCH 004/264] update to match t5 prompt template --- lm_eval/tasks/super_glue/boolq/t5-prompt.yaml | 2 +- lm_eval/tasks/super_glue/record/t5-prompt.yaml | 2 +- lm_eval/tasks/super_glue/rte/t5-prompt.yaml | 2 +- lm_eval/tasks/super_glue/wsc/preprocess_wsc.py | 9 +++------ lm_eval/tasks/super_glue/wsc/t5-prompt.yaml | 2 +- 5 files changed, 7 insertions(+), 10 deletions(-) diff --git a/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml b/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml index 3c5936dc..8b216605 100644 --- a/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml @@ -6,7 +6,7 @@ dataset_name: boolq training_split: train validation_split: validation output_type: greedy_until -doc_to_text: "boolq question: {{question}} passage: {{passage}}" +doc_to_text: "boolq passage: {{passage}} question: {{question}}" doc_to_target: label doc_to_choice: ['False', 'True'] metric_list: diff --git a/lm_eval/tasks/super_glue/record/t5-prompt.yaml b/lm_eval/tasks/super_glue/record/t5-prompt.yaml index 9025d865..e674fed0 100644 --- a/lm_eval/tasks/super_glue/record/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/record/t5-prompt.yaml @@ -6,7 +6,7 @@ dataset_name: record training_split: train validation_split: validation output_type: greedy_until -doc_to_text: "record query: {{query}} entities: {{entities}} passage: {{passage}}" +doc_to_text: "record query: {{query}} entities: {{entities|join(\", \")}} passage: {{passage}}" doc_to_target: "{{answers}}" metric_list: - metric: exact_match diff --git a/lm_eval/tasks/super_glue/rte/t5-prompt.yaml b/lm_eval/tasks/super_glue/rte/t5-prompt.yaml index 3da43b47..2725b70e 100644 --- a/lm_eval/tasks/super_glue/rte/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/rte/t5-prompt.yaml @@ -6,7 +6,7 @@ dataset_name: rte training_split: train validation_split: validation output_type: greedy_until -doc_to_text: "rte premise: {{premise}} hypothesis: {{hypothesis}}" +doc_to_text: "rte hypothesis: {{hypothesis}} premise: {{premise}}" doc_to_target: label doc_to_choice: ['entailment', 'not_entailment'] metric_list: diff --git a/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py b/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py index 0f7c2510..0a830e06 100644 --- a/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py +++ b/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py @@ -7,15 +7,12 @@ def t5_prompt_doc_to_text(x): pattern_tmpl = r"^((?:\S+\s){N})(W)" pattern = re.sub("N", str(span_idx), pattern_tmpl) pattern = re.sub("W", span_str, pattern) - return re.sub(pattern, r"\1{0} \2 {0}".format(mark), text) + return re.sub(pattern, r"\1{0}\2{0}".format(mark), text) text = x["text"] - text = _mark_span(text, x["span1_text"], x["span1_index"], "*") - # Compensate for 2 added "words" added in previous step. - span2_index = x["span2_index"] + 2 * (x["span1_index"] < x["span2_index"]) - text = _mark_span(text, x["span2_text"], span2_index, "#") + text = _mark_span(text, x["span2_text"], x["span2_index"], "*") - return text + return "wsc: "+text def default_doc_to_text(x): diff --git a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml index a42ff135..4dfb18ae 100644 --- a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml @@ -7,7 +7,7 @@ training_split: train validation_split: validation output_type: greedy_until doc_to_text: !function "preprocess_wsc.t5_prompt_doc_to_text" -doc_to_target: "{{[span1_text, span2_text][label]}}" +doc_to_target: span1_text metric_list: - metric: exact_match aggregation: mean -- GitLab From e1fdf2a88cec2ec37539e52d4d6abc46a7938554 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 8 Aug 2023 13:15:36 +0000 Subject: [PATCH 005/264] update --- lm_eval/tasks/super_glue/wic/t5-prompt.yaml | 2 +- lm_eval/tasks/super_glue/wsc/default.yaml | 2 +- lm_eval/tasks/super_glue/wsc/preprocess_wsc.py | 14 -------------- lm_eval/tasks/super_glue/wsc/t5-prompt.yaml | 2 +- 4 files changed, 3 insertions(+), 17 deletions(-) diff --git a/lm_eval/tasks/super_glue/wic/t5-prompt.yaml b/lm_eval/tasks/super_glue/wic/t5-prompt.yaml index 8e41d05e..a48cfaee 100644 --- a/lm_eval/tasks/super_glue/wic/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/wic/t5-prompt.yaml @@ -6,7 +6,7 @@ dataset_name: wic training_split: train validation_split: validation output_type: greedy_until -doc_to_text: "wic sentence1: {{sentence1}} sentence2: {{sentence2}}" +doc_to_text: "wic sentence1: {{sentence1}} sentence2: {{sentence2}} word: {{word}}" doc_to_target: label doc_to_choice: ['False', 'True'] metric_list: diff --git a/lm_eval/tasks/super_glue/wsc/default.yaml b/lm_eval/tasks/super_glue/wsc/default.yaml index 8c12370d..15014705 100644 --- a/lm_eval/tasks/super_glue/wsc/default.yaml +++ b/lm_eval/tasks/super_glue/wsc/default.yaml @@ -2,7 +2,7 @@ group: - super-glue-lm-eval-v1 task: wsc dataset_path: super_glue -dataset_name: wsc +dataset_name: wsc.fixed output_type: multiple_choice training_split: train validation_split: validation diff --git a/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py b/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py index 0a830e06..c62c2567 100644 --- a/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py +++ b/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py @@ -1,20 +1,6 @@ -import re from lm_eval.utils import general_detokenize -def t5_prompt_doc_to_text(x): - def _mark_span(text, span_str, span_idx, mark): - pattern_tmpl = r"^((?:\S+\s){N})(W)" - pattern = re.sub("N", str(span_idx), pattern_tmpl) - pattern = re.sub("W", span_str, pattern) - return re.sub(pattern, r"\1{0}\2{0}".format(mark), text) - - text = x["text"] - text = _mark_span(text, x["span2_text"], x["span2_index"], "*") - - return "wsc: "+text - - def default_doc_to_text(x): raw_passage = x["text"] # NOTE: HuggingFace span indices are word-based not character-based. diff --git a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml index 4dfb18ae..bf9920d0 100644 --- a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml @@ -6,7 +6,7 @@ dataset_name: wsc training_split: train validation_split: validation output_type: greedy_until -doc_to_text: !function "preprocess_wsc.t5_prompt_doc_to_text" +doc_to_text: !function "t5_utils.t5_prompt_doc_to_text" doc_to_target: span1_text metric_list: - metric: exact_match -- GitLab From 25a4e2c61ff6b29a5466daf3d987e038168de2db Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 8 Aug 2023 22:36:02 +0000 Subject: [PATCH 006/264] add yaml-genning script + WIP base yaml --- lm_eval/tasks/mmlu/gen_all_splits.py | 105 ++++++++++++++++++ .../mmlu/hendrycks_test_original_default.yaml | 21 ++++ 2 files changed, 126 insertions(+) create mode 100644 lm_eval/tasks/mmlu/gen_all_splits.py create mode 100644 lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml diff --git a/lm_eval/tasks/mmlu/gen_all_splits.py b/lm_eval/tasks/mmlu/gen_all_splits.py new file mode 100644 index 00000000..5d22d3a7 --- /dev/null +++ b/lm_eval/tasks/mmlu/gen_all_splits.py @@ -0,0 +1,105 @@ +""" +Take in a YAML, and output all other splits with this YAML +""" +import os +import yaml +import argparse + +from tqdm import tqdm + +from lm_eval import utils +from lm_eval.logger import eval_logger + +SUBJECTS = [ + # "abstract_algebra", + "anatomy", + "astronomy", + "business_ethics", + "clinical_knowledge", + "college_biology", + "college_chemistry", + "college_computer_science", + "college_mathematics", + "college_medicine", + "college_physics", + "computer_security", + "conceptual_physics", + "econometrics", + "electrical_engineering", + "elementary_mathematics", + "formal_logic", + "global_facts", + "high_school_biology", + "high_school_chemistry", + "high_school_computer_science", + "high_school_european_history", + "high_school_geography", + "high_school_government_and_politics", + "high_school_macroeconomics", + "high_school_mathematics", + "high_school_microeconomics", + "high_school_physics", + "high_school_psychology", + "high_school_statistics", + "high_school_us_history", + "high_school_world_history", + "human_aging", + "human_sexuality", + "international_law", + "jurisprudence", + "logical_fallacies", + "machine_learning", + "management", + "marketing", + "medical_genetics", + "miscellaneous", + "moral_disputes", + "moral_scenarios", + "nutrition", + "philosophy", + "prehistory", + "professional_accounting", + "professional_law", + "professional_medicine", + "professional_psychology", + "public_relations", + "security_studies", + "sociology", + "us_foreign_policy", + "virology", + "world_religions", +] + + +def parse_args(): + parser = argparse.ArgumentParser() + # parser.add_argument("--benchmark_name", required=True) + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument( + "--task_save_path", default="lm_eval/tasks/mmlu/hendrycks_test_original" + ) + return parser.parse_args() + + +if __name__ == "__main__": + + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our other YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path) as f: + base_yaml = yaml.full_load(f) + print(base_yaml) + + for subject in tqdm(SUBJECTS): + + yaml_dict = { + "include": base_yaml_name, + "task": base_yaml["task"].strip("abstract_algebra") + "subject", + "dataset_name": subject, + } + + file_save_path = args.task_save_path + f"_{subject}.yaml" + eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}") + with open(file_save_path, "w") as yaml_file: + yaml.dump(yaml_dict, yaml_file) diff --git a/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml b/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml new file mode 100644 index 00000000..cde530c2 --- /dev/null +++ b/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml @@ -0,0 +1,21 @@ +group: + - mmlu + - mmlu_original + - multiple_choice +task: mmlu_original_ +dataset_path: cais/mmlu +dataset_name: abstract_algebra +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Question: {{question}}\nAnswer:" +doc_to_target: "{{choices.label.index(answerKey)}}" +doc_to_choice: "{{choices.text}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true -- GitLab From 620d8a362291a24244d38962c5c9805e3439af8c Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 8 Aug 2023 23:05:17 +0000 Subject: [PATCH 007/264] add draft abstract algebra task, fix genning script --- .pre-commit-config.yaml | 2 +- lm_eval/tasks/mmlu/gen_all_splits.py | 1 + .../tasks/mmlu/hendrycks_test_original_default.yaml | 10 +++++----- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8455eb0d..adb25b0a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,6 +40,6 @@ repos: - id: codespell exclude: > (?x)^( - .*\.json|ignore.txt + .*\.json|ignore.txt|.*\.yaml )$ args: [--check-filenames, --check-hidden, --ignore-words=ignore.txt] diff --git a/lm_eval/tasks/mmlu/gen_all_splits.py b/lm_eval/tasks/mmlu/gen_all_splits.py index 5d22d3a7..f6f7d96e 100644 --- a/lm_eval/tasks/mmlu/gen_all_splits.py +++ b/lm_eval/tasks/mmlu/gen_all_splits.py @@ -97,6 +97,7 @@ if __name__ == "__main__": "include": base_yaml_name, "task": base_yaml["task"].strip("abstract_algebra") + "subject", "dataset_name": subject, + "description": f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n", } file_save_path = args.task_save_path + f"_{subject}.yaml" diff --git a/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml b/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml index cde530c2..4eb7ea4e 100644 --- a/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml +++ b/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml @@ -2,16 +2,16 @@ group: - mmlu - mmlu_original - multiple_choice -task: mmlu_original_ +task: mmlu_original_abstract_algebra dataset_path: cais/mmlu dataset_name: abstract_algebra output_type: multiple_choice -training_split: train validation_split: validation test_split: test -doc_to_text: "Question: {{question}}\nAnswer:" -doc_to_target: "{{choices.label.index(answerKey)}}" -doc_to_choice: "{{choices.text}}" +description: "The following are multiple choice questions (with answers) about abstract algebra.\n\n" +doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" +doc_to_choice: "{{choices}}" +doc_to_target: "{{answer}}" metric_list: - metric: acc aggregation: mean -- GitLab From 9b00813fc0cf289feaea8f8bbc5e49c71bd947e1 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 8 Aug 2023 23:42:36 +0000 Subject: [PATCH 008/264] fix choices --- lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml b/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml index 4eb7ea4e..248e7561 100644 --- a/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml +++ b/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml @@ -10,7 +10,7 @@ validation_split: validation test_split: test description: "The following are multiple choice questions (with answers) about abstract algebra.\n\n" doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" -doc_to_choice: "{{choices}}" +doc_to_choice: ["A", "B", "C", "D"] doc_to_target: "{{answer}}" metric_list: - metric: acc -- GitLab From 8806eff5796ece5453b6525d874b247f4de660b4 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 11 Aug 2023 16:01:50 +0000 Subject: [PATCH 009/264] support bleu score as a metric --- lm_eval/api/metrics.py | 46 +++++++++++++++++++++++++---------------- lm_eval/api/task.py | 27 ++++++++++++++---------- lm_eval/evaluator.py | 47 ++++++++++++++++++++++++------------------ 3 files changed, 71 insertions(+), 49 deletions(-) diff --git a/lm_eval/api/metrics.py b/lm_eval/api/metrics.py index c618bbcf..f8c66305 100644 --- a/lm_eval/api/metrics.py +++ b/lm_eval/api/metrics.py @@ -56,6 +56,24 @@ def matthews_corrcoef(items): return sklearn.metrics.matthews_corrcoef(golds, preds) +@register_aggregation("bleu") +def bleu(items): + """The Bilingual Evaluation Understudy Score, or BLEU for short, is a metric + for evaluating a generated sentence to a reference sentence. It counts matching + n-grams in the candidate translation to n-grams in the reference text, where + 1-gram or unigram would be each token and a bigram comparison would be each + word pair. The comparison is made regardless of word order + Source: https://machinelearningmastery.com/calculate-bleu-score-for-text-python/ + Paper: https://www.aclweb.org/anthology/P02-1040/ + + Higher is better + """ + refs = list(zip(*items))[0] + preds = list(zip(*items))[1] + refs, preds = _sacreformat(refs, preds) + return sacrebleu.corpus_bleu(preds, refs).score + + @register_metric( metric="acc", higher_is_better=True, @@ -160,6 +178,16 @@ def f1_fn(items): # This is a passthrough function return items +@register_metric( + metric="bleu", + higher_is_better=True, + output_type="greedy_until", + aggregation="bleu", +) +def bleu_fn(items): # This is a passthrough function + return items + + @register_metric( metric="acc_all", higher_is_better=True, @@ -217,24 +245,6 @@ def weighted_mean(items): return sum(a) / sum(b) -@register_metric(metric="bleu", higher_is_better=True, aggregation="mean") -def bleu(items): - """The Bilingual Evaluation Understudy Score, or BLEU for short, is a metric - for evaluating a generated sentence to a reference sentence. It counts matching - n-grams in the candidate translation to n-grams in the reference text, where - 1-gram or unigram would be each token and a bigram comparison would be each - word pair. The comparison is made regardless of word order - Source: https://machinelearningmastery.com/calculate-bleu-score-for-text-python/ - Paper: https://www.aclweb.org/anthology/P02-1040/ - - Higher is better - """ - refs = list(zip(*items))[0] - preds = list(zip(*items))[1] - refs, preds = _sacreformat(refs, preds) - return sacrebleu.corpus_bleu(preds, refs).score - - @register_metric(metric="chrf", higher_is_better=True, aggregation="mean") def chrf(items): """chrF++ is a tool for automatic evaluation of machine translation output diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index df87df0c..95a11c9e 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -999,11 +999,14 @@ class ConfigurableTask(Task): # TODO: this may break for multipLe_target, non zero-or-1 metrics scores = [] for gold_option in gold: - res = self._metric_fn_list[key]( - references=[gold_option], - predictions=[result], - **self._metric_fn_kwargs[key], - ) + try: + res = self._metric_fn_list[key]( + references=[gold_option], + predictions=[result], + **self._metric_fn_kwargs[key], + ) + except TypeError: # TODO: this is hacky and I don't want to do it + result = self._metric_fn_list[key]([gold_option, result]) if isinstance(res, dict): # TODO: this handles the case where HF evaluate returns a dict. res = res[key] @@ -1013,12 +1016,14 @@ class ConfigurableTask(Task): else: result = 0.0 else: - result = self._metric_fn_list[key]( - references=[gold], - predictions=[result], - **self._metric_fn_kwargs[key], - ) - + try: + result = self._metric_fn_list[key]( + references=[gold], + predictions=[result], + **self._metric_fn_kwargs[key], + ) + except TypeError: + result = self._metric_fn_list[key]([gold, result]) if isinstance(result, dict): result_dict.update(result) else: diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 01fb7abf..68a6472a 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -362,28 +362,35 @@ def evaluate( if type(items[0]) == tuple: numitem = len(items[0]) - # distributed gather requires all ranks to have same dimensions - # so we pad out with float32 min value - pad_value = torch.finfo(torch.float32).min - metrics_tensor = torch.tensor(items, device=lm.device) - - original_dtype = metrics_tensor.dtype # store original dtype - torch_device_tensor = lm.accelerator.pad_across_processes( - metrics_tensor.to(torch.float32), pad_index=pad_value - ) - gathered_item = lm.accelerator.gather(torch_device_tensor) + if isinstance(items[0], (str, list)): + # handle the string case + gathered_items = [None] * lm.accelerator.num_processes + torch.distributed.all_gather_object(gathered_items, items) - if numitem > 0: - gathered_filtered = gathered_item[gathered_item[:, 0] != pad_value] + gathered_item = list(itertools.chain.from_iterable(gathered_items)) else: - gathered_filtered = gathered_item[gathered_item != pad_value] + # distributed gather requires all ranks to have same dimensions + # so we pad out with float32 min value + pad_value = torch.finfo(torch.float32).min + metrics_tensor = torch.tensor(items, device=lm.device) + + original_dtype = metrics_tensor.dtype # store original dtype + torch_device_tensor = lm.accelerator.pad_across_processes( + metrics_tensor.to(torch.float32), pad_index=pad_value + ) + gathered_item = lm.accelerator.gather(torch_device_tensor) - gathered_item = ( - gathered_filtered.to(original_dtype).cpu().detach().numpy().tolist() - ) - # reconvert if we were passed a tuple of values - if numitem > 0: - gathered_item = [tuple(g) for g in gathered_item] + if numitem > 0: + gathered_filtered = gathered_item[gathered_item[:, 0] != pad_value] + else: + gathered_filtered = gathered_item[gathered_item != pad_value] + + gathered_item = ( + gathered_filtered.to(original_dtype).cpu().detach().numpy().tolist() + ) + # reconvert if we were passed a tuple of values + if numitem > 0: + gathered_item = [tuple(g) for g in gathered_item] if lm.rank == 0: vals_torch[(task_name, key, metric)] = gathered_item @@ -415,7 +422,7 @@ def evaluate( # hotfix: bleu, chrf, ter seem to be really expensive to bootstrap # so we run them less iterations. still looking for a cleaner way to do this - if bootstrap_iters > 0: + if False: # bootstrap_iters > 0: stderr = lm_eval.api.metrics.stderr_for_metric( metric=task.aggregation()[metric], bootstrap_iters=min(bootstrap_iters, 1000) -- GitLab From 116c540ad3162d6a701fa567ae16f153e89f7587 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Sat, 12 Aug 2023 02:59:38 +0000 Subject: [PATCH 010/264] make chrf and ter aggregations --- lm_eval/api/metrics.py | 82 ++++++++++++++++++++++++++---------------- 1 file changed, 51 insertions(+), 31 deletions(-) diff --git a/lm_eval/api/metrics.py b/lm_eval/api/metrics.py index f8c66305..857588b9 100644 --- a/lm_eval/api/metrics.py +++ b/lm_eval/api/metrics.py @@ -74,6 +74,37 @@ def bleu(items): return sacrebleu.corpus_bleu(preds, refs).score +@register_aggregation("chrf") +def chrf(items): + """chrF++ is a tool for automatic evaluation of machine translation output + based on character n-gram precision and recall enhanced with word n-grams. + Source: https://github.com/m-popovic/chrF + Paper: https://www.aclweb.org/anthology/W15-3049.pdf + + Higher is better # TODO I think + """ + refs = list(zip(*items))[0] + preds = list(zip(*items))[1] + refs, preds = _sacreformat(refs, preds) + return sacrebleu.corpus_chrf(preds, refs).score + + +@register_aggregation("ter") +def ter(items): + """Translation Error Rate is an error metric for machine translation that + measures the number of edits required to change a system output into one + of the references + Source: http://www.cs.umd.edu/~snover/tercom/ + Paper: http://mt-archive.info/AMTA-2006-Snover.pdf + + Lower is better + """ + refs = list(zip(*items))[0] + preds = list(zip(*items))[1] + refs, preds = _sacreformat(refs, preds) + return sacrebleu.corpus_ter(preds, refs).score + + @register_metric( metric="acc", higher_is_better=True, @@ -188,6 +219,26 @@ def bleu_fn(items): # This is a passthrough function return items +@register_metric( + metric="chrf", + higher_is_better=True, + output_type="greedy_until", + aggregation="chrf", +) +def chrf_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="ter", + higher_is_better=True, + output_type="greedy_until", + aggregation="ter", +) +def ter_fn(items): # This is a passthrough function + return items + + @register_metric( metric="acc_all", higher_is_better=True, @@ -245,37 +296,6 @@ def weighted_mean(items): return sum(a) / sum(b) -@register_metric(metric="chrf", higher_is_better=True, aggregation="mean") -def chrf(items): - """chrF++ is a tool for automatic evaluation of machine translation output - based on character n-gram precision and recall enhanced with word n-grams. - Source: https://github.com/m-popovic/chrF - Paper: https://www.aclweb.org/anthology/W15-3049.pdf - - Higher is better # TODO I think - """ - refs = list(zip(*items))[0] - preds = list(zip(*items))[1] - refs, preds = _sacreformat(refs, preds) - return sacrebleu.corpus_chrf(preds, refs).score - - -@register_metric(metric="ter", higher_is_better=True, aggregation="mean") -def ter(items): - """Translation Error Rate is an error metric for machine translation that - measures the number of edits required to change a system output into one - of the references - Source: http://www.cs.umd.edu/~snover/tercom/ - Paper: http://mt-archive.info/AMTA-2006-Snover.pdf - - Lower is better - """ - refs = list(zip(*items))[0] - preds = list(zip(*items))[1] - refs, preds = _sacreformat(refs, preds) - return sacrebleu.corpus_ter(preds, refs).score - - def is_non_str_iterable(obj): return isinstance(obj, Iterable) and not isinstance(obj, str) -- GitLab From df6c5dcbe6127e39974d98bb20e55557fdac2d92 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Sat, 12 Aug 2023 03:00:12 +0000 Subject: [PATCH 011/264] fix multi-metric greedy_until --- lm_eval/api/task.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 95a11c9e..42b53ad6 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -78,7 +78,7 @@ class TaskConfig(dict): # runtime configuration options num_fewshot: int = 0 # scoring options - metric_list: str = None + metric_list: list = None output_type: str = "greedy_until" generation_kwargs: dict = None repeats: int = 1 @@ -992,7 +992,8 @@ class ConfigurableTask(Task): choices = self.doc_to_choice(doc) gold = choices[gold] - for key, result in zip(self._metric_fn_list.keys(), results): + for key in self._metric_fn_list.keys(): + result = results[0] if self.multiple_target: # in the case where we have multiple targets, # return true if any are true -- GitLab From 60ca1a27c6bac1d787079c61316243335e37abad Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 15 Aug 2023 14:31:12 +0000 Subject: [PATCH 012/264] update squadv2 --- lm_eval/tasks/squadv2/README.md | 36 ++++++++++++++++++ lm_eval/tasks/squadv2/default.yaml | 47 +++++++++++++++++++++++ lm_eval/tasks/squadv2/metric.py | 39 +++++++++++++++++++ lm_eval/tasks/squadv2/utils.py | 61 ++++++++++++++++++++++++++++++ 4 files changed, 183 insertions(+) create mode 100644 lm_eval/tasks/squadv2/README.md create mode 100644 lm_eval/tasks/squadv2/default.yaml create mode 100644 lm_eval/tasks/squadv2/metric.py create mode 100644 lm_eval/tasks/squadv2/utils.py diff --git a/lm_eval/tasks/squadv2/README.md b/lm_eval/tasks/squadv2/README.md new file mode 100644 index 00000000..c13bd21d --- /dev/null +++ b/lm_eval/tasks/squadv2/README.md @@ -0,0 +1,36 @@ +# Task-name + +### Paper + +Title: `paper title goes here` +Abstract: `link to paper PDF or arXiv abstract goes here` + +`Short description of paper / benchmark goes here:` + +Homepage: `homepage to the benchmark's website goes here, if applicable` + + +### Citation + +``` +BibTeX-formatted citation goes here +``` + +### Subtasks + +List or describe tasks defined in this folder, and their names here: +* `task_name`: `1-sentence description of what this particular task does` +* `task_name2`: ..... + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/squadv2/default.yaml b/lm_eval/tasks/squadv2/default.yaml new file mode 100644 index 00000000..ac0dbd9d --- /dev/null +++ b/lm_eval/tasks/squadv2/default.yaml @@ -0,0 +1,47 @@ +task: squadv2 +dataset_path: squad_v2 +output_type: greedy_until +training_split: train +validation_split: validation +doc_to_text: "Title: {{title}}\n\nBackground: {{context}}\n\nQuestion: {{question}}\n\n Answer:" +doc_to_target: "{% if answers.text| length > 0 %}{{answers.text}}{% else %}{{['unanswerable']}}{% endif %}" +target_delimiter: "" +should_decontaminate: true +doc_to_decontamination_query: context +process_results: !function utils.process_results +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +metric_list: + - metric: !function metric.exact + aggregation: mean + higher_is_better: true + - metric: !function metric.f1 + aggregation: mean + higher_is_better: true + - metric: !function metric.HasAns_exact + aggregation: mean + higher_is_better: true + - metric: !function metric.HasAns_f1 + aggregation: mean + higher_is_better: true + - metric: !function metric.NoAns_exact + aggregation: mean + higher_is_better: true + - metric: !function metric.NoAns_f1 + aggregation: mean + higher_is_better: true + - metric: !function metric.best_exact + aggregation: mean + higher_is_better: true + - metric: !function metric.best_f1 + aggregation: mean + higher_is_better: true diff --git a/lm_eval/tasks/squadv2/metric.py b/lm_eval/tasks/squadv2/metric.py new file mode 100644 index 00000000..5a181622 --- /dev/null +++ b/lm_eval/tasks/squadv2/metric.py @@ -0,0 +1,39 @@ +import evaluate +from functools import partial + + +def _squad_metric(predictions, references): + squad_metric = evaluate.load("squad_v2") + return squad_metric.compute(predictions=predictions, references=references) + +# Exact match (the normalized answer exactly match the gold answer) +def exact(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("exact", 0) + +# The F-score of predicted tokens versus the gold answer +def f1(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("f1", 0) + +# Exact match (the normalized answer exactly match the gold answer) +def HasAns_exact(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("HasAns_exact", 0) + +# The F-score of predicted tokens versus the gold answer +def HasAns_f1(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("HasAns_f1", 0) + +# Exact match (the normalized answer exactly match the gold answer) +def NoAns_exact(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("NoAns_exact", 0) + +# The F-score of predicted tokens versus the gold answer +def NoAns_f1(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("NoAns_f1", 0) + +# Best exact match (with varying threshold) +def best_exact(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("best_exact", 0) + +# Best F1 (with varying threshold) +def best_f1(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("best_f1", 0) diff --git a/lm_eval/tasks/squadv2/utils.py b/lm_eval/tasks/squadv2/utils.py new file mode 100644 index 00000000..8147c15c --- /dev/null +++ b/lm_eval/tasks/squadv2/utils.py @@ -0,0 +1,61 @@ +import evaluate + +from math import exp +from functools import partial + + +def process_results(doc, results): + + continuation = results[0] + no_answer_probability = 0 # exp(logprob_unanswerable) + + predictions = { + "id": doc["id"], + "prediction_text": continuation, + "no_answer_probability": no_answer_probability, + } + + references = { + "id": doc["id"], + "answers": doc["answers"], + } + + print(_squad_metric(predictions, references)) + return _squad_metric(predictions, references) + + +def _squad_metric(predictions, references): + squad_metric = evaluate.load("squad_v2") + return squad_metric.compute(predictions=predictions, references=references) + +# Exact match (the normalized answer exactly match the gold answer) +def exact(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("exact", 0) + +# The F-score of predicted tokens versus the gold answer +def f1(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("f1", 0) + +# Exact match (the normalized answer exactly match the gold answer) +def HasAns_exact(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("HasAns_exact", 0) + +# The F-score of predicted tokens versus the gold answer +def HasAns_f1(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("HasAns_f1", 0) + +# Exact match (the normalized answer exactly match the gold answer) +def NoAns_exact(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("NoAns_exact", 0) + +# The F-score of predicted tokens versus the gold answer +def NoAns_f1(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("NoAns_f1", 0) + +# Best exact match (with varying threshold) +def best_exact(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("best_exact", 0) + +# Best F1 (with varying threshold) +def best_f1(predictions, references): + return _squad_metric(predictions=predictions, references=references).get("best_f1", 0) -- GitLab From 39bd5caf02cb0cd94c1c090baada2a7fcb924289 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 15 Aug 2023 14:52:34 +0000 Subject: [PATCH 013/264] make pycountry optional dep --- setup.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index cf8843ad..3e7d2f6c 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ extras_require = { ], "testing": ["pytest", "pytest-cov", "pytest-xdist"], "multilingual": ["nagisa>=0.2.7", "jieba>=0.42.1"], - "sentencepiece": ["sentencepiece>=0.1.98", "protobuf>=4.22.1"], + "sentencepiece": ["sentencepiece>=0.1.98", "protobuf>=4.22.1", "pycountry"], "promptsource": [ "promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource" ], @@ -62,10 +62,9 @@ setuptools.setup( "omegaconf>=2.2", "peft>=0.2.0", "pybind11>=2.6.2", - "pycountry", "pytablewriter", "rouge-score>=0.0.4", - "sacrebleu==1.5.0", + "sacrebleu>=1.5.0", "scikit-learn>=0.24.1", "sqlitedict", "torch>=1.8", -- GitLab From 40f81c42e5eb82adbfb389cd7d35f7e7031856d4 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 15 Aug 2023 14:57:36 +0000 Subject: [PATCH 014/264] add translation tasks --- lm_eval/tasks/translation/README.md | 39 ++++++ .../tasks/translation/iwslt2017_ar-en.yaml | 13 ++ .../tasks/translation/iwslt2017_en-ar.yaml | 13 ++ lm_eval/tasks/translation/utils.py | 117 ++++++++++++++++++ lm_eval/tasks/translation/wmt14_fr-en.yaml | 14 +++ lm_eval/tasks/translation/wmt16_de-en.yaml | 14 +++ lm_eval/tasks/translation/wmt16_ro-en.yaml | 14 +++ lm_eval/tasks/translation/wmt_common_yaml | 15 +++ 8 files changed, 239 insertions(+) create mode 100644 lm_eval/tasks/translation/README.md create mode 100644 lm_eval/tasks/translation/iwslt2017_ar-en.yaml create mode 100644 lm_eval/tasks/translation/iwslt2017_en-ar.yaml create mode 100644 lm_eval/tasks/translation/utils.py create mode 100644 lm_eval/tasks/translation/wmt14_fr-en.yaml create mode 100644 lm_eval/tasks/translation/wmt16_de-en.yaml create mode 100644 lm_eval/tasks/translation/wmt16_ro-en.yaml create mode 100644 lm_eval/tasks/translation/wmt_common_yaml diff --git a/lm_eval/tasks/translation/README.md b/lm_eval/tasks/translation/README.md new file mode 100644 index 00000000..7ce7046b --- /dev/null +++ b/lm_eval/tasks/translation/README.md @@ -0,0 +1,39 @@ +# Translation Tasks + +### Paper + + + +### Citation + +``` + +``` + +### Groups and Tasks + +#### Groups + +* `gpt3_translation_tasks` +* `wmt14` +* `wmt16` +* `wmt20` +* `iwslt17` + +#### Tasks + +* + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? + * [ ] Checked for equivalence with v0.3.0 LM Evaluation Harness diff --git a/lm_eval/tasks/translation/iwslt2017_ar-en.yaml b/lm_eval/tasks/translation/iwslt2017_ar-en.yaml new file mode 100644 index 00000000..6f8b2e53 --- /dev/null +++ b/lm_eval/tasks/translation/iwslt2017_ar-en.yaml @@ -0,0 +1,13 @@ +# Generated by utils.py +dataset_name: iwslt2017-ar-en +dataset_path: iwslt2017 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'Arabic phrase: {{translation["ar"]}} + + English phrase:' +group: +- greedy_until +- translation +- iwslt2017 +include: wmt_common_yaml +task: iwslt2017-ar-en diff --git a/lm_eval/tasks/translation/iwslt2017_en-ar.yaml b/lm_eval/tasks/translation/iwslt2017_en-ar.yaml new file mode 100644 index 00000000..d3c0462a --- /dev/null +++ b/lm_eval/tasks/translation/iwslt2017_en-ar.yaml @@ -0,0 +1,13 @@ +# Generated by utils.py +dataset_name: iwslt2017-en-ar +dataset_path: iwslt2017 +doc_to_target: ' {{translation["ar"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + Arabic phrase:' +group: +- greedy_until +- translation +- iwslt2017 +include: wmt_common_yaml +task: iwslt2017-en-ar diff --git a/lm_eval/tasks/translation/utils.py b/lm_eval/tasks/translation/utils.py new file mode 100644 index 00000000..68a1a834 --- /dev/null +++ b/lm_eval/tasks/translation/utils.py @@ -0,0 +1,117 @@ +import argparse +from typing import Dict, List + +import yaml + +import sacrebleu + +try: + import pycountry +except ModuleNotFoundError: + raise Exception( + "`pycountry` is required for generating translation task prompt templates. \ +please install pycountry via pip install lm-eval[multilingua] or pip install -e .[multilingual]", + ) + + +# Different translation benchmarks included in the library. Mostly WMT. +# These correspond to dataset names (subsets) on HuggingFace for each dataset. +# A yaml file is generated by this script for each language pair. + +gpt3_translation_benchmarks = { + "wmt14": ["fr-en"], # ["en-fr", "fr-en"], # French + "wmt16": [ + "ro-en", + "de-en", + ], # ["en-ro", "ro-en", "de-en", "en-de"], # German, Romanian +} + +# 28 total +LANGUAGES = { + **gpt3_translation_benchmarks, + # "wmt20": sacrebleu.get_langpairs_for_testset("wmt20"), + "iwslt2017": ["en-ar", "ar-en"], # Arabic +} + + +def code_to_language(code): + # key is alpha_2 or alpha_3 depending on the code length + language_tuple = pycountry.languages.get(**{f"alpha_{len(code)}": code}) + return language_tuple.name + + +def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: + """ + Generate a yaml file for each language. + + :param output_dir: The directory to output the files to. + :param overwrite: Whether to overwrite files if they already exist. + """ + err = [] + for lang in LANGUAGES.keys(): + for lang_pair in LANGUAGES[lang]: + file_name = f"{lang}_{lang_pair}.yaml" + try: + src_lang, _, tgt_lang = lang_pair.partition("-") + source, target = code_to_language(src_lang), code_to_language(tgt_lang) + + groups = ["greedy_until", "translation", lang] + if lang in gpt3_translation_benchmarks.keys(): + groups += ["gpt3_translation_benchmarks"] + + with open( + f"{output_dir}/{file_name}", + "w" if overwrite else "x", + encoding="utf8", + ) as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": "wmt_common_yaml", + "group": groups, + "dataset_path": lang, + "dataset_name": lang_pair + if not (lang == "iwslt2017") + else "iwslt2017-" + lang_pair, + "task": f"{lang}-{lang_pair}", + "doc_to_text": f"{source} phrase: " + + "{{translation[" + + f'"{src_lang}"' + + "]}}\n" + + f"{target} phrase:", + "doc_to_target": " {{" + + "translation[" + + f'"{tgt_lang}"]' + + "}}", + }, + f, + ) + except FileExistsError: + err.append(file_name) + + if len(err) > 0: + raise FileExistsError( + "Files were not created because they already exist (use --overwrite flag):" + f" {', '.join(err)}" + ) + + +def main() -> None: + """Parse CLI args and generate language-specific yaml files.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Overwrite files if they already exist", + ) + parser.add_argument( + "--output-dir", default=".", help="Directory to write yaml files to" + ) + args = parser.parse_args() + + gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite) + + +if __name__ == "__main__": + main() diff --git a/lm_eval/tasks/translation/wmt14_fr-en.yaml b/lm_eval/tasks/translation/wmt14_fr-en.yaml new file mode 100644 index 00000000..0a414359 --- /dev/null +++ b/lm_eval/tasks/translation/wmt14_fr-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: fr-en +dataset_path: wmt14 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'French phrase: {{translation["fr"]}} + + English phrase:' +group: +- greedy_until +- translation +- wmt14 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt14-fr-en diff --git a/lm_eval/tasks/translation/wmt16_de-en.yaml b/lm_eval/tasks/translation/wmt16_de-en.yaml new file mode 100644 index 00000000..b38d21b8 --- /dev/null +++ b/lm_eval/tasks/translation/wmt16_de-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: de-en +dataset_path: wmt16 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'German phrase: {{translation["de"]}} + + English phrase:' +group: +- greedy_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-de-en diff --git a/lm_eval/tasks/translation/wmt16_ro-en.yaml b/lm_eval/tasks/translation/wmt16_ro-en.yaml new file mode 100644 index 00000000..14278794 --- /dev/null +++ b/lm_eval/tasks/translation/wmt16_ro-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: ro-en +dataset_path: wmt16 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'Romanian phrase: {{translation["ro"]}} + + English phrase:' +group: +- greedy_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-ro-en diff --git a/lm_eval/tasks/translation/wmt_common_yaml b/lm_eval/tasks/translation/wmt_common_yaml new file mode 100644 index 00000000..5be7c978 --- /dev/null +++ b/lm_eval/tasks/translation/wmt_common_yaml @@ -0,0 +1,15 @@ +output_type: greedy_until +training_split: train +validation_split: validation +fewshot_split: validation +test_split: test +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 -- GitLab From fc329d319afc263fb6d600663944d3b24944f0de Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 15 Aug 2023 14:59:28 +0000 Subject: [PATCH 015/264] update --- lm_eval/tasks/squadv2/default.yaml | 48 +++++++++++++++--------------- lm_eval/tasks/squadv2/metric.py | 39 ------------------------ lm_eval/tasks/squadv2/utils.py | 29 +++++++++++------- 3 files changed, 42 insertions(+), 74 deletions(-) delete mode 100644 lm_eval/tasks/squadv2/metric.py diff --git a/lm_eval/tasks/squadv2/default.yaml b/lm_eval/tasks/squadv2/default.yaml index ac0dbd9d..c49d3fb8 100644 --- a/lm_eval/tasks/squadv2/default.yaml +++ b/lm_eval/tasks/squadv2/default.yaml @@ -21,27 +21,27 @@ filter_list: - function: remove_whitespace - function: take_first metric_list: - - metric: !function metric.exact - aggregation: mean - higher_is_better: true - - metric: !function metric.f1 - aggregation: mean - higher_is_better: true - - metric: !function metric.HasAns_exact - aggregation: mean - higher_is_better: true - - metric: !function metric.HasAns_f1 - aggregation: mean - higher_is_better: true - - metric: !function metric.NoAns_exact - aggregation: mean - higher_is_better: true - - metric: !function metric.NoAns_f1 - aggregation: mean - higher_is_better: true - - metric: !function metric.best_exact - aggregation: mean - higher_is_better: true - - metric: !function metric.best_f1 - aggregation: mean - higher_is_better: true + - metric: exact + aggregation: !function utils.exact + higher_is_better: true + # - metric: f1 + # aggregation: mean + # higher_is_better: true + # - metric: HasAns_exact + # aggregation: mean + # higher_is_better: true + # - metric: HasAns_f1 + # aggregation: mean + # higher_is_better: true + # - metric: NoAns_exact + # aggregation: mean + # higher_is_better: true + # - metric: NoAns_f1 + # aggregation: mean + # higher_is_better: true + # - metric: best_exact + # aggregation: mean + # higher_is_better: true + # - metric: best_f1 + # aggregation: mean + # higher_is_better: true diff --git a/lm_eval/tasks/squadv2/metric.py b/lm_eval/tasks/squadv2/metric.py deleted file mode 100644 index 5a181622..00000000 --- a/lm_eval/tasks/squadv2/metric.py +++ /dev/null @@ -1,39 +0,0 @@ -import evaluate -from functools import partial - - -def _squad_metric(predictions, references): - squad_metric = evaluate.load("squad_v2") - return squad_metric.compute(predictions=predictions, references=references) - -# Exact match (the normalized answer exactly match the gold answer) -def exact(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("exact", 0) - -# The F-score of predicted tokens versus the gold answer -def f1(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("f1", 0) - -# Exact match (the normalized answer exactly match the gold answer) -def HasAns_exact(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("HasAns_exact", 0) - -# The F-score of predicted tokens versus the gold answer -def HasAns_f1(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("HasAns_f1", 0) - -# Exact match (the normalized answer exactly match the gold answer) -def NoAns_exact(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("NoAns_exact", 0) - -# The F-score of predicted tokens versus the gold answer -def NoAns_f1(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("NoAns_f1", 0) - -# Best exact match (with varying threshold) -def best_exact(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("best_exact", 0) - -# Best F1 (with varying threshold) -def best_f1(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("best_f1", 0) diff --git a/lm_eval/tasks/squadv2/utils.py b/lm_eval/tasks/squadv2/utils.py index 8147c15c..1a3ac0e8 100644 --- a/lm_eval/tasks/squadv2/utils.py +++ b/lm_eval/tasks/squadv2/utils.py @@ -20,8 +20,12 @@ def process_results(doc, results): "answers": doc["answers"], } - print(_squad_metric(predictions, references)) - return _squad_metric(predictions, references) + return { + "predictions": predictions, + "reference": references + } + # return _squad_metric([predictions], [references]) + # return {key: value if key in metrics for key, value in score.items()} def _squad_metric(predictions, references): @@ -29,33 +33,36 @@ def _squad_metric(predictions, references): return squad_metric.compute(predictions=predictions, references=references) # Exact match (the normalized answer exactly match the gold answer) -def exact(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("exact", 0) +def exact(items): + print(items) + import sys; sys.exit() + predictions, references = zip(*items) + return _squad_metric(predictions=predictions, references=references)["exact"] # The F-score of predicted tokens versus the gold answer def f1(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("f1", 0) + return _squad_metric(predictions=predictions, references=references)["f1"] # Exact match (the normalized answer exactly match the gold answer) def HasAns_exact(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("HasAns_exact", 0) + return _squad_metric(predictions=predictions, references=references)["HasAns_exact"] # The F-score of predicted tokens versus the gold answer def HasAns_f1(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("HasAns_f1", 0) + return _squad_metric(predictions=predictions, references=references)["HasAns_f1"] # Exact match (the normalized answer exactly match the gold answer) def NoAns_exact(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("NoAns_exact", 0) + return _squad_metric(predictions=predictions, references=references)["NoAns_exact"] # The F-score of predicted tokens versus the gold answer def NoAns_f1(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("NoAns_f1", 0) + return _squad_metric(predictions=predictions, references=references)["NoAns_f1"] # Best exact match (with varying threshold) def best_exact(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("best_exact", 0) + return _squad_metric(predictions=predictions, references=references)["best_exact"] # Best F1 (with varying threshold) def best_f1(predictions, references): - return _squad_metric(predictions=predictions, references=references).get("best_f1", 0) + return _squad_metric(predictions=predictions, references=references)["best_f1"] -- GitLab From 21aa92d26ffaf028976e25a7593abaee8f414d2e Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 15 Aug 2023 15:03:54 +0000 Subject: [PATCH 016/264] rename var --- lm_eval/api/task.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 42658d81..b8c9d6e9 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -1011,34 +1011,36 @@ class ConfigurableTask(Task): scores = [] for gold_option in gold: try: - res = self._metric_fn_list[key]( + result_score = self._metric_fn_list[key]( references=[gold_option], predictions=[result], **self._metric_fn_kwargs[key], ) except TypeError: # TODO: this is hacky and I don't want to do it - result = self._metric_fn_list[key]([gold_option, result]) - if isinstance(res, dict): + result_score = self._metric_fn_list[key]( + [gold_option, result] + ) + if isinstance(result_score, dict): # TODO: this handles the case where HF evaluate returns a dict. - res = res[key] - scores.append(res) + result_score = result_score[key] + scores.append(result_score) if any(scores): - result = 1.0 + result_score = 1.0 else: - result = 0.0 + result_score = 0.0 else: try: - result = self._metric_fn_list[key]( + result_score = self._metric_fn_list[key]( references=[gold], predictions=[result], **self._metric_fn_kwargs[key], ) except TypeError: - result = self._metric_fn_list[key]([gold, result]) - if isinstance(result, dict): - result_dict.update(result) + result_score = self._metric_fn_list[key]([gold, result]) + if isinstance(result_score, dict): + result_dict.update(result_score) else: - result_dict[key] = result + result_dict[key] = result_score else: raise ValueError( f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ", -- GitLab From 416a3ca1cfb6f0287ceb080c492f5956217119cf Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 15 Aug 2023 17:10:27 +0000 Subject: [PATCH 017/264] add mgsm --- lm_eval/tasks/mgsm/README.md | 74 ++++++++++++++ lm_eval/tasks/mgsm/common_template_yaml | 22 +++++ lm_eval/tasks/mgsm/utils.py | 123 ++++++++++++++++++++++++ 3 files changed, 219 insertions(+) create mode 100644 lm_eval/tasks/mgsm/README.md create mode 100644 lm_eval/tasks/mgsm/common_template_yaml create mode 100644 lm_eval/tasks/mgsm/utils.py diff --git a/lm_eval/tasks/mgsm/README.md b/lm_eval/tasks/mgsm/README.md new file mode 100644 index 00000000..62b46c56 --- /dev/null +++ b/lm_eval/tasks/mgsm/README.md @@ -0,0 +1,74 @@ +# MGSM + +### Paper + +Title: `Language Models are Multilingual Chain-of-Thought Reasoners` + +Abstract: https://arxiv.org/abs/2210.03057 + +Multilingual Grade School Math Benchmark (MGSM) is a benchmark of grade-school math problems, proposed in the paper [Language models are multilingual chain-of-thought reasoners](http://arxiv.org/abs/2210.03057). + +The same 250 problems from [GSM8K](https://arxiv.org/abs/2110.14168) are each translated via human annotators in 10 languages. The 10 languages are: +- Spanish +- French +- German +- Russian +- Chinese +- Japanese +- Thai +- Swahili +- Bengali +- Telugu + +GSM8K (Grade School Math 8K) is a dataset of 8.5K high quality linguistically diverse grade school math word problems. The dataset was created to support the task of question answering on basic mathematical problems that require multi-step reasoning. + +You can find the input and targets for each of the ten languages (and English) as `.tsv` files. +We also include few-shot exemplars that are also manually translated from each language in `exemplars.py`. + +Homepage: https://github.com/google-research/url-nlp/tree/main/mgsm + + +### Citation + +``` +@misc{cobbe2021training, + title={Training Verifiers to Solve Math Word Problems}, + author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman}, + year={2021}, + eprint={2110.14168}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +@misc{shi2022language, + title={Language Models are Multilingual Chain-of-Thought Reasoners}, + author={Freda Shi and Mirac Suzgun and Markus Freitag and Xuezhi Wang and Suraj Srivats and Soroush Vosoughi and Hyung Won Chung and Yi Tay and Sebastian Ruder and Denny Zhou and Dipanjan Das and Jason Wei}, + year={2022}, + eprint={2210.03057}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* `mgsm` + +#### Tasks + +* `task_name`: `1-sentence description of what this particular task does` +* `task_name2`: ... + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? \ No newline at end of file diff --git a/lm_eval/tasks/mgsm/common_template_yaml b/lm_eval/tasks/mgsm/common_template_yaml new file mode 100644 index 00000000..e2bc953e --- /dev/null +++ b/lm_eval/tasks/mgsm/common_template_yaml @@ -0,0 +1,22 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: mgsm +dataset_path: juletxara/mgsm +dataset_name: null # Overridden by language-specific config. +output_type: greedy_until +training_split: train +test_split: test +target_delimiter: "" +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true diff --git a/lm_eval/tasks/mgsm/utils.py b/lm_eval/tasks/mgsm/utils.py new file mode 100644 index 00000000..0cc94257 --- /dev/null +++ b/lm_eval/tasks/mgsm/utils.py @@ -0,0 +1,123 @@ +import argparse +from typing import Dict, List + +import yaml + + +LANGUAGES = { + "bn": { # Bengali + "QUESTION": "\u09aa\u09cd\u09b0\u09b6\u09cd\u09a8:", + "ANSWER": "\u09a7\u09be\u09aa\u09c7 \u09a7\u09be\u09aa\u09c7 \u0989\u09a4\u09cd\u09a4\u09b0:", + }, + "de": { # German + "QUESTION": "Frage:", + "ANSWER": "Schritt-f\u00fcr-Schritt-Antwort:", + }, + "en": { # English + "QUESTION": "Question:", + "ANSWER": "Step-by-Step Answer:", + }, + "es": { # Spanish + "QUESTION": "Pregunta:", + "ANSWER": "Respuesta paso a paso:", + }, + "fr": { # French + "QUESTION": "Question :", + "ANSWER": "R\u00e9ponse \u00e9tape par \u00e9tape :", + }, + "ru": { # Russian + "QUESTION": "\u0417\u0430\u0434\u0430\u0447\u0430:", + "ANSWER": "\u041f\u043e\u0448\u0430\u0433\u043e\u0432\u043e\u0435\u0440\u0435\u0448\u0435\u043d\u0438\u0435:", + }, + "sw": { # Swahili + "QUESTION": "Swali:", + "ANSWER": "Jibu la Hatua kwa Hatua:", + }, + "te": { # Telugu + "QUESTION": "\u0c2a\u0c4d\u0c30\u0c36\u0c4d\u0c28:", + "ANSWER": "\u0c26\u0c36\u0c32\u0c35\u0c3e\u0c30\u0c40\u0c17\u0c3e \u0c38\u0c2e\u0c3e\u0c27\u0c3e\u0c28\u0c02:", + }, + "th": { # Thai + "QUESTION": "\u0e42\u0e08\u0e17\u0e22\u0e4c:", + "ANSWER": "\u0e04\u0e33\u0e15\u0e2d\u0e1a\u0e17\u0e35\u0e25\u0e30\u0e02\u0e31\u0e49\u0e19\u0e15\u0e2d\u0e19:", + }, + "ja": { # Japanese + "QUESTION": "\u554f\u984c:", + "ANSWER": "\u30b9\u30c6\u30c3\u30d7\u3054\u3068\u306e\u7b54\u3048:", + }, + "zh": { # Chinese + "QUESTION": "\u95ee\u9898:", + "ANSWER": "\u9010\u6b65\u89e3\u7b54:", + }, +} + +def doc_to_text(doc, QUESTION, ANSWER): + if doc["answer"] is not None: + return doc["question"] + "\n" + ANSWER + else: + return QUESTION + " " + doc["question"] + "\n" + ANSWER + +def doc_to_target(doc, QUESTION, ANSWER): + if doc["answer"] is not None: + return " " + doc["answer"][len(ANSWER) + 1 :] + else: + return " " + str(doc["answer_number"]) + + +def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: + """ + Generate a yaml file for each language. + + :param output_dir: The directory to output the files to. + :param overwrite: Whether to overwrite files if they already exist. + """ + err = [] + for lang in LANGUAGES.keys(): + file_name = f"mgsm_{lang}.yaml" + try: + QUESTION = LANGUAGES[lang]["QUESTION"] + ANSWER = LANGUAGES[lang]["ANSWER"] + with open( + f"{output_dir}/{file_name}", "w" if overwrite else "x", encoding="utf8" + ) as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": "common_template_yaml", + "dataset_name": lang, + "task": f"mgsm_{lang}", + "doc_to_text": doc_to_text(doc, QUESTION, ANSWER), + "doc_to_target": doc_to_target(doc, QUESTION, ANSWER), + }, + f, + allow_unicode=True, + ) + except FileExistsError: + err.append(file_name) + + if len(err) > 0: + raise FileExistsError( + "Files were not created because they already exist (use --overwrite flag):" + f" {', '.join(err)}" + ) + + +def main() -> None: + """Parse CLI args and generate language-specific yaml files.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Overwrite files if they already exist", + ) + parser.add_argument( + "--output-dir", default=".", help="Directory to write yaml files to" + ) + args = parser.parse_args() + + gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite) + + +if __name__ == "__main__": + main() -- GitLab From 3e9c9a950cb8dec463d000249be11ff5478bf680 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 16 Aug 2023 02:42:39 +0000 Subject: [PATCH 018/264] add mgsm yamls --- lm_eval/tasks/mgsm/common_template_yaml | 6 ++++++ lm_eval/tasks/mgsm/mgsm_bn.yaml | 8 +++++++ lm_eval/tasks/mgsm/mgsm_de.yaml | 8 +++++++ lm_eval/tasks/mgsm/mgsm_en.yaml | 8 +++++++ lm_eval/tasks/mgsm/mgsm_es.yaml | 8 +++++++ lm_eval/tasks/mgsm/mgsm_fr.yaml | 8 +++++++ lm_eval/tasks/mgsm/mgsm_ja.yaml | 8 +++++++ lm_eval/tasks/mgsm/mgsm_ru.yaml | 8 +++++++ lm_eval/tasks/mgsm/mgsm_sw.yaml | 8 +++++++ lm_eval/tasks/mgsm/mgsm_te.yaml | 8 +++++++ lm_eval/tasks/mgsm/mgsm_th.yaml | 8 +++++++ lm_eval/tasks/mgsm/mgsm_zh.yaml | 8 +++++++ lm_eval/tasks/mgsm/utils.py | 28 ++++++++++--------------- 13 files changed, 105 insertions(+), 17 deletions(-) create mode 100644 lm_eval/tasks/mgsm/mgsm_bn.yaml create mode 100644 lm_eval/tasks/mgsm/mgsm_de.yaml create mode 100644 lm_eval/tasks/mgsm/mgsm_en.yaml create mode 100644 lm_eval/tasks/mgsm/mgsm_es.yaml create mode 100644 lm_eval/tasks/mgsm/mgsm_fr.yaml create mode 100644 lm_eval/tasks/mgsm/mgsm_ja.yaml create mode 100644 lm_eval/tasks/mgsm/mgsm_ru.yaml create mode 100644 lm_eval/tasks/mgsm/mgsm_sw.yaml create mode 100644 lm_eval/tasks/mgsm/mgsm_te.yaml create mode 100644 lm_eval/tasks/mgsm/mgsm_th.yaml create mode 100644 lm_eval/tasks/mgsm/mgsm_zh.yaml diff --git a/lm_eval/tasks/mgsm/common_template_yaml b/lm_eval/tasks/mgsm/common_template_yaml index e2bc953e..470cd255 100644 --- a/lm_eval/tasks/mgsm/common_template_yaml +++ b/lm_eval/tasks/mgsm/common_template_yaml @@ -14,6 +14,12 @@ generation_kwargs: - "\n" do_sample: false temperature: 0.0 +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +target_delimiter: " " metric_list: - metric: exact_match aggregation: mean diff --git a/lm_eval/tasks/mgsm/mgsm_bn.yaml b/lm_eval/tasks/mgsm/mgsm_bn.yaml new file mode 100644 index 00000000..2130cca8 --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_bn.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: bn +doc_to_target: '{% if answer is not none %}{{answer[16+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nধাপে ধাপে উত্তর:"}}{% else + %}{{"প্রশ্ন: "+question+"\nধাপে ধাপে উত্তর:"}}{% endif %}' +include: common_template_yaml +task: mgsm_bn diff --git a/lm_eval/tasks/mgsm/mgsm_de.yaml b/lm_eval/tasks/mgsm/mgsm_de.yaml new file mode 100644 index 00000000..8ae4c55b --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_de.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: de +doc_to_target: '{% if answer is not none %}{{answer[28+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nSchritt-für-Schritt-Antwort:"}}{% + else %}{{"Frage: "+question+"\nSchritt-für-Schritt-Antwort:"}}{% endif %}' +include: common_template_yaml +task: mgsm_de diff --git a/lm_eval/tasks/mgsm/mgsm_en.yaml b/lm_eval/tasks/mgsm/mgsm_en.yaml new file mode 100644 index 00000000..75bb850b --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_en.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: en +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else + %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: common_template_yaml +task: mgsm_en diff --git a/lm_eval/tasks/mgsm/mgsm_es.yaml b/lm_eval/tasks/mgsm/mgsm_es.yaml new file mode 100644 index 00000000..c158ccc7 --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_es.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: es +doc_to_target: '{% if answer is not none %}{{answer[22+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nRespuesta paso a paso:"}}{% + else %}{{"Pregunta: "+question+"\nRespuesta paso a paso:"}}{% endif %}' +include: common_template_yaml +task: mgsm_es diff --git a/lm_eval/tasks/mgsm/mgsm_fr.yaml b/lm_eval/tasks/mgsm/mgsm_fr.yaml new file mode 100644 index 00000000..f41c9a95 --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_fr.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: fr +doc_to_target: '{% if answer is not none %}{{answer[25+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nRéponse étape par étape :"}}{% + else %}{{"Question : "+question+"\nRéponse étape par étape :"}}{% endif %}' +include: common_template_yaml +task: mgsm_fr diff --git a/lm_eval/tasks/mgsm/mgsm_ja.yaml b/lm_eval/tasks/mgsm/mgsm_ja.yaml new file mode 100644 index 00000000..e2a5ff7b --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_ja.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: ja +doc_to_target: '{% if answer is not none %}{{answer[10+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nステップごとの答え:"}}{% else %}{{"問題: + "+question+"\nステップごとの答え:"}}{% endif %}' +include: common_template_yaml +task: mgsm_ja diff --git a/lm_eval/tasks/mgsm/mgsm_ru.yaml b/lm_eval/tasks/mgsm/mgsm_ru.yaml new file mode 100644 index 00000000..fe497d64 --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_ru.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: ru +doc_to_target: '{% if answer is not none %}{{answer[17+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nПошаговоерешение:"}}{% else + %}{{"Задача: "+question+"\nПошаговоерешение:"}}{% endif %}' +include: common_template_yaml +task: mgsm_ru diff --git a/lm_eval/tasks/mgsm/mgsm_sw.yaml b/lm_eval/tasks/mgsm/mgsm_sw.yaml new file mode 100644 index 00000000..5eaf1a41 --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_sw.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: sw +doc_to_target: '{% if answer is not none %}{{answer[24+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nJibu la Hatua kwa Hatua:"}}{% + else %}{{"Swali: "+question+"\nJibu la Hatua kwa Hatua:"}}{% endif %}' +include: common_template_yaml +task: mgsm_sw diff --git a/lm_eval/tasks/mgsm/mgsm_te.yaml b/lm_eval/tasks/mgsm/mgsm_te.yaml new file mode 100644 index 00000000..c93049aa --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_te.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: te +doc_to_target: '{% if answer is not none %}{{answer[18+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nదశలవారీగా సమాధానం:"}}{% else + %}{{"ప్రశ్న: "+question+"\nదశలవారీగా సమాధానం:"}}{% endif %}' +include: common_template_yaml +task: mgsm_te diff --git a/lm_eval/tasks/mgsm/mgsm_th.yaml b/lm_eval/tasks/mgsm/mgsm_th.yaml new file mode 100644 index 00000000..1f651ab4 --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_th.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: th +doc_to_target: '{% if answer is not none %}{{answer[17+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nคำตอบทีละขั้นตอน:"}}{% else + %}{{"โจทย์: "+question+"\nคำตอบทีละขั้นตอน:"}}{% endif %}' +include: common_template_yaml +task: mgsm_th diff --git a/lm_eval/tasks/mgsm/mgsm_zh.yaml b/lm_eval/tasks/mgsm/mgsm_zh.yaml new file mode 100644 index 00000000..327e6753 --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_zh.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: zh +doc_to_target: '{% if answer is not none %}{{answer[5+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\n逐步解答:"}}{% else %}{{"问题: "+question+"\n逐步解答:"}}{% + endif %}' +include: common_template_yaml +task: mgsm_zh diff --git a/lm_eval/tasks/mgsm/utils.py b/lm_eval/tasks/mgsm/utils.py index 0cc94257..17620c07 100644 --- a/lm_eval/tasks/mgsm/utils.py +++ b/lm_eval/tasks/mgsm/utils.py @@ -1,7 +1,5 @@ -import argparse -from typing import Dict, List - import yaml +import argparse LANGUAGES = { @@ -51,18 +49,6 @@ LANGUAGES = { }, } -def doc_to_text(doc, QUESTION, ANSWER): - if doc["answer"] is not None: - return doc["question"] + "\n" + ANSWER - else: - return QUESTION + " " + doc["question"] + "\n" + ANSWER - -def doc_to_target(doc, QUESTION, ANSWER): - if doc["answer"] is not None: - return " " + doc["answer"][len(ANSWER) + 1 :] - else: - return " " + str(doc["answer_number"]) - def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: """ @@ -86,8 +72,16 @@ def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: "include": "common_template_yaml", "dataset_name": lang, "task": f"mgsm_{lang}", - "doc_to_text": doc_to_text(doc, QUESTION, ANSWER), - "doc_to_target": doc_to_target(doc, QUESTION, ANSWER), + "doc_to_text": f"""{{% if answer is not none %}}""" \ + f"""{{{{question+"\\n{ANSWER}"}}}}""" \ + f"""{{% else %}}""" \ + f"""{{{{"{QUESTION} "+question+"\\n{ANSWER}"}}}}""" \ + f"""{{% endif %}}""", + "doc_to_target": f"""{{% if answer is not none %}}""" \ + f"""{{{{answer[{len(ANSWER)}+1]}}}}""" \ + f"""{{% else %}}""" \ + f"""{{{{answer_number|string}}}}""" \ + f"""{{% endif %}}""", }, f, allow_unicode=True, -- GitLab From d99e6cf4f9e6734932b5f13c501bfc6fb9c566e1 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 16 Aug 2023 05:54:37 +0000 Subject: [PATCH 019/264] fix doc_to_choice being called when target is digit --- lm_eval/api/task.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 8dbbb2d5..8838e432 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -1002,9 +1002,13 @@ class ConfigurableTask(Task): elif self.OUTPUT_TYPE == "greedy_until": gold = self.doc_to_target(doc) - if type(gold) == int: + if self._config.doc_to_choice is not None: + # If you set doc_to_choice, + # it assumes that doc_to_target returns a number. choices = self.doc_to_choice(doc) gold = choices[gold] + else: + gold = str(gold) for key, result in zip(self._metric_fn_list.keys(), results): if self.multiple_target: -- GitLab From 83f95961e4991493b83727581a6306b9a804bfa7 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 16 Aug 2023 05:54:51 +0000 Subject: [PATCH 020/264] add task variants --- lm_eval/tasks/mgsm/cot_yaml | 23 ++++++ .../{common_template_yaml => direct_yaml} | 1 - lm_eval/tasks/mgsm/mgsm_bn_direct.yaml | 8 ++ .../{mgsm_bn.yaml => mgsm_bn_native-cot.yaml} | 10 ++- lm_eval/tasks/mgsm/mgsm_de_direct.yaml | 8 ++ .../{mgsm_de.yaml => mgsm_de_native-cot.yaml} | 10 ++- lm_eval/tasks/mgsm/mgsm_en_direct.yaml | 8 ++ .../{mgsm_en.yaml => mgsm_en_native-cot.yaml} | 10 ++- lm_eval/tasks/mgsm/mgsm_es_direct.yaml | 8 ++ .../{mgsm_es.yaml => mgsm_es_native-cot.yaml} | 10 ++- lm_eval/tasks/mgsm/mgsm_fr_direct.yaml | 8 ++ .../{mgsm_fr.yaml => mgsm_fr_native-cot.yaml} | 10 ++- lm_eval/tasks/mgsm/mgsm_ja_direct.yaml | 8 ++ .../{mgsm_ja.yaml => mgsm_ja_native-cot.yaml} | 10 ++- lm_eval/tasks/mgsm/mgsm_ru_direct.yaml | 8 ++ .../{mgsm_ru.yaml => mgsm_ru_native-cot.yaml} | 10 ++- lm_eval/tasks/mgsm/mgsm_sw_direct.yaml | 8 ++ .../{mgsm_sw.yaml => mgsm_sw_native-cot.yaml} | 10 ++- lm_eval/tasks/mgsm/mgsm_te_direct.yaml | 8 ++ .../{mgsm_te.yaml => mgsm_te_native-cot.yaml} | 10 ++- lm_eval/tasks/mgsm/mgsm_th_direct.yaml | 8 ++ .../{mgsm_th.yaml => mgsm_th_native-cot.yaml} | 10 ++- lm_eval/tasks/mgsm/mgsm_zh_direct.yaml | 8 ++ .../{mgsm_zh.yaml => mgsm_zh_native-cot.yaml} | 10 ++- lm_eval/tasks/mgsm/utils.py | 76 +++++++++++++++++-- 25 files changed, 269 insertions(+), 29 deletions(-) create mode 100644 lm_eval/tasks/mgsm/cot_yaml rename lm_eval/tasks/mgsm/{common_template_yaml => direct_yaml} (97%) create mode 100644 lm_eval/tasks/mgsm/mgsm_bn_direct.yaml rename lm_eval/tasks/mgsm/{mgsm_bn.yaml => mgsm_bn_native-cot.yaml} (68%) create mode 100644 lm_eval/tasks/mgsm/mgsm_de_direct.yaml rename lm_eval/tasks/mgsm/{mgsm_de.yaml => mgsm_de_native-cot.yaml} (65%) create mode 100644 lm_eval/tasks/mgsm/mgsm_en_direct.yaml rename lm_eval/tasks/mgsm/{mgsm_en.yaml => mgsm_en_native-cot.yaml} (64%) create mode 100644 lm_eval/tasks/mgsm/mgsm_es_direct.yaml rename lm_eval/tasks/mgsm/{mgsm_es.yaml => mgsm_es_native-cot.yaml} (64%) create mode 100644 lm_eval/tasks/mgsm/mgsm_fr_direct.yaml rename lm_eval/tasks/mgsm/{mgsm_fr.yaml => mgsm_fr_native-cot.yaml} (65%) create mode 100644 lm_eval/tasks/mgsm/mgsm_ja_direct.yaml rename lm_eval/tasks/mgsm/{mgsm_ja.yaml => mgsm_ja_native-cot.yaml} (65%) create mode 100644 lm_eval/tasks/mgsm/mgsm_ru_direct.yaml rename lm_eval/tasks/mgsm/{mgsm_ru.yaml => mgsm_ru_native-cot.yaml} (66%) create mode 100644 lm_eval/tasks/mgsm/mgsm_sw_direct.yaml rename lm_eval/tasks/mgsm/{mgsm_sw.yaml => mgsm_sw_native-cot.yaml} (64%) create mode 100644 lm_eval/tasks/mgsm/mgsm_te_direct.yaml rename lm_eval/tasks/mgsm/{mgsm_te.yaml => mgsm_te_native-cot.yaml} (69%) create mode 100644 lm_eval/tasks/mgsm/mgsm_th_direct.yaml rename lm_eval/tasks/mgsm/{mgsm_th.yaml => mgsm_th_native-cot.yaml} (68%) create mode 100644 lm_eval/tasks/mgsm/mgsm_zh_direct.yaml rename lm_eval/tasks/mgsm/{mgsm_zh.yaml => mgsm_zh_native-cot.yaml} (63%) diff --git a/lm_eval/tasks/mgsm/cot_yaml b/lm_eval/tasks/mgsm/cot_yaml new file mode 100644 index 00000000..a1def277 --- /dev/null +++ b/lm_eval/tasks/mgsm/cot_yaml @@ -0,0 +1,23 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: mgsm +dataset_path: juletxara/mgsm +dataset_name: null # Overridden by language-specific config. +output_type: greedy_until +training_split: train +test_split: test +target_delimiter: "" +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true diff --git a/lm_eval/tasks/mgsm/common_template_yaml b/lm_eval/tasks/mgsm/direct_yaml similarity index 97% rename from lm_eval/tasks/mgsm/common_template_yaml rename to lm_eval/tasks/mgsm/direct_yaml index 470cd255..17649d2d 100644 --- a/lm_eval/tasks/mgsm/common_template_yaml +++ b/lm_eval/tasks/mgsm/direct_yaml @@ -19,7 +19,6 @@ filter_list: filter: - function: remove_whitespace - function: take_first -target_delimiter: " " metric_list: - metric: exact_match aggregation: mean diff --git a/lm_eval/tasks/mgsm/mgsm_bn_direct.yaml b/lm_eval/tasks/mgsm/mgsm_bn_direct.yaml new file mode 100644 index 00000000..87622a3a --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_bn_direct.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: bn +doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"প্রশ্ন: + "+question+"\nAnswer"}}{% endif %}' +include: direct_yaml +task: mgsm_bn_direct diff --git a/lm_eval/tasks/mgsm/mgsm_bn.yaml b/lm_eval/tasks/mgsm/mgsm_bn_native-cot.yaml similarity index 68% rename from lm_eval/tasks/mgsm/mgsm_bn.yaml rename to lm_eval/tasks/mgsm/mgsm_bn_native-cot.yaml index 2130cca8..ce050085 100644 --- a/lm_eval/tasks/mgsm/mgsm_bn.yaml +++ b/lm_eval/tasks/mgsm/mgsm_bn_native-cot.yaml @@ -4,5 +4,11 @@ doc_to_target: '{% if answer is not none %}{{answer[16+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nধাপে ধাপে উত্তর:"}}{% else %}{{"প্রশ্ন: "+question+"\nধাপে ধাপে উত্তর:"}}{% endif %}' -include: common_template_yaml -task: mgsm_bn +filter: +- function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) +- function: take_first +filter_list: +- name: get-answer +include: cot_yaml +task: mgsm_bn_direct diff --git a/lm_eval/tasks/mgsm/mgsm_de_direct.yaml b/lm_eval/tasks/mgsm/mgsm_de_direct.yaml new file mode 100644 index 00000000..97b6a92f --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_de_direct.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: de +doc_to_target: '{% if answer is not none %}{{answer[7+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAntwort"}}{% else %}{{"Frage: + "+question+"\nAntwort"}}{% endif %}' +include: direct_yaml +task: mgsm_de_direct diff --git a/lm_eval/tasks/mgsm/mgsm_de.yaml b/lm_eval/tasks/mgsm/mgsm_de_native-cot.yaml similarity index 65% rename from lm_eval/tasks/mgsm/mgsm_de.yaml rename to lm_eval/tasks/mgsm/mgsm_de_native-cot.yaml index 8ae4c55b..ff8d789b 100644 --- a/lm_eval/tasks/mgsm/mgsm_de.yaml +++ b/lm_eval/tasks/mgsm/mgsm_de_native-cot.yaml @@ -4,5 +4,11 @@ doc_to_target: '{% if answer is not none %}{{answer[28+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nSchritt-für-Schritt-Antwort:"}}{% else %}{{"Frage: "+question+"\nSchritt-für-Schritt-Antwort:"}}{% endif %}' -include: common_template_yaml -task: mgsm_de +filter: +- function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) +- function: take_first +filter_list: +- name: get-answer +include: cot_yaml +task: mgsm_de_direct diff --git a/lm_eval/tasks/mgsm/mgsm_en_direct.yaml b/lm_eval/tasks/mgsm/mgsm_en_direct.yaml new file mode 100644 index 00000000..f0ff6c9d --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_en_direct.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: en +doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"Question: + "+question+"\nAnswer"}}{% endif %}' +include: direct_yaml +task: mgsm_en_direct diff --git a/lm_eval/tasks/mgsm/mgsm_en.yaml b/lm_eval/tasks/mgsm/mgsm_en_native-cot.yaml similarity index 64% rename from lm_eval/tasks/mgsm/mgsm_en.yaml rename to lm_eval/tasks/mgsm/mgsm_en_native-cot.yaml index 75bb850b..9174cc90 100644 --- a/lm_eval/tasks/mgsm/mgsm_en.yaml +++ b/lm_eval/tasks/mgsm/mgsm_en_native-cot.yaml @@ -4,5 +4,11 @@ doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' -include: common_template_yaml -task: mgsm_en +filter: +- function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) +- function: take_first +filter_list: +- name: get-answer +include: cot_yaml +task: mgsm_en_direct diff --git a/lm_eval/tasks/mgsm/mgsm_es_direct.yaml b/lm_eval/tasks/mgsm/mgsm_es_direct.yaml new file mode 100644 index 00000000..5430016e --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_es_direct.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: es +doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"Pregunta: + "+question+"\nAnswer"}}{% endif %}' +include: direct_yaml +task: mgsm_es_direct diff --git a/lm_eval/tasks/mgsm/mgsm_es.yaml b/lm_eval/tasks/mgsm/mgsm_es_native-cot.yaml similarity index 64% rename from lm_eval/tasks/mgsm/mgsm_es.yaml rename to lm_eval/tasks/mgsm/mgsm_es_native-cot.yaml index c158ccc7..ed223378 100644 --- a/lm_eval/tasks/mgsm/mgsm_es.yaml +++ b/lm_eval/tasks/mgsm/mgsm_es_native-cot.yaml @@ -4,5 +4,11 @@ doc_to_target: '{% if answer is not none %}{{answer[22+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nRespuesta paso a paso:"}}{% else %}{{"Pregunta: "+question+"\nRespuesta paso a paso:"}}{% endif %}' -include: common_template_yaml -task: mgsm_es +filter: +- function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) +- function: take_first +filter_list: +- name: get-answer +include: cot_yaml +task: mgsm_es_direct diff --git a/lm_eval/tasks/mgsm/mgsm_fr_direct.yaml b/lm_eval/tasks/mgsm/mgsm_fr_direct.yaml new file mode 100644 index 00000000..b5e0f924 --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_fr_direct.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: fr +doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"Question + : "+question+"\nAnswer"}}{% endif %}' +include: direct_yaml +task: mgsm_fr_direct diff --git a/lm_eval/tasks/mgsm/mgsm_fr.yaml b/lm_eval/tasks/mgsm/mgsm_fr_native-cot.yaml similarity index 65% rename from lm_eval/tasks/mgsm/mgsm_fr.yaml rename to lm_eval/tasks/mgsm/mgsm_fr_native-cot.yaml index f41c9a95..4aa4a252 100644 --- a/lm_eval/tasks/mgsm/mgsm_fr.yaml +++ b/lm_eval/tasks/mgsm/mgsm_fr_native-cot.yaml @@ -4,5 +4,11 @@ doc_to_target: '{% if answer is not none %}{{answer[25+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nRéponse étape par étape :"}}{% else %}{{"Question : "+question+"\nRéponse étape par étape :"}}{% endif %}' -include: common_template_yaml -task: mgsm_fr +filter: +- function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) +- function: take_first +filter_list: +- name: get-answer +include: cot_yaml +task: mgsm_fr_direct diff --git a/lm_eval/tasks/mgsm/mgsm_ja_direct.yaml b/lm_eval/tasks/mgsm/mgsm_ja_direct.yaml new file mode 100644 index 00000000..ef9d862c --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_ja_direct.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: ja +doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"問題: "+question+"\nAnswer"}}{% + endif %}' +include: direct_yaml +task: mgsm_ja_direct diff --git a/lm_eval/tasks/mgsm/mgsm_ja.yaml b/lm_eval/tasks/mgsm/mgsm_ja_native-cot.yaml similarity index 65% rename from lm_eval/tasks/mgsm/mgsm_ja.yaml rename to lm_eval/tasks/mgsm/mgsm_ja_native-cot.yaml index e2a5ff7b..282c1f4c 100644 --- a/lm_eval/tasks/mgsm/mgsm_ja.yaml +++ b/lm_eval/tasks/mgsm/mgsm_ja_native-cot.yaml @@ -4,5 +4,11 @@ doc_to_target: '{% if answer is not none %}{{answer[10+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nステップごとの答え:"}}{% else %}{{"問題: "+question+"\nステップごとの答え:"}}{% endif %}' -include: common_template_yaml -task: mgsm_ja +filter: +- function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) +- function: take_first +filter_list: +- name: get-answer +include: cot_yaml +task: mgsm_ja_direct diff --git a/lm_eval/tasks/mgsm/mgsm_ru_direct.yaml b/lm_eval/tasks/mgsm/mgsm_ru_direct.yaml new file mode 100644 index 00000000..fe581069 --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_ru_direct.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: ru +doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"Задача: + "+question+"\nAnswer"}}{% endif %}' +include: direct_yaml +task: mgsm_ru_direct diff --git a/lm_eval/tasks/mgsm/mgsm_ru.yaml b/lm_eval/tasks/mgsm/mgsm_ru_native-cot.yaml similarity index 66% rename from lm_eval/tasks/mgsm/mgsm_ru.yaml rename to lm_eval/tasks/mgsm/mgsm_ru_native-cot.yaml index fe497d64..b5fd4128 100644 --- a/lm_eval/tasks/mgsm/mgsm_ru.yaml +++ b/lm_eval/tasks/mgsm/mgsm_ru_native-cot.yaml @@ -4,5 +4,11 @@ doc_to_target: '{% if answer is not none %}{{answer[17+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nПошаговоерешение:"}}{% else %}{{"Задача: "+question+"\nПошаговоерешение:"}}{% endif %}' -include: common_template_yaml -task: mgsm_ru +filter: +- function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) +- function: take_first +filter_list: +- name: get-answer +include: cot_yaml +task: mgsm_ru_direct diff --git a/lm_eval/tasks/mgsm/mgsm_sw_direct.yaml b/lm_eval/tasks/mgsm/mgsm_sw_direct.yaml new file mode 100644 index 00000000..d19052e3 --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_sw_direct.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: sw +doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"Swali: + "+question+"\nAnswer"}}{% endif %}' +include: direct_yaml +task: mgsm_sw_direct diff --git a/lm_eval/tasks/mgsm/mgsm_sw.yaml b/lm_eval/tasks/mgsm/mgsm_sw_native-cot.yaml similarity index 64% rename from lm_eval/tasks/mgsm/mgsm_sw.yaml rename to lm_eval/tasks/mgsm/mgsm_sw_native-cot.yaml index 5eaf1a41..63655f65 100644 --- a/lm_eval/tasks/mgsm/mgsm_sw.yaml +++ b/lm_eval/tasks/mgsm/mgsm_sw_native-cot.yaml @@ -4,5 +4,11 @@ doc_to_target: '{% if answer is not none %}{{answer[24+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nJibu la Hatua kwa Hatua:"}}{% else %}{{"Swali: "+question+"\nJibu la Hatua kwa Hatua:"}}{% endif %}' -include: common_template_yaml -task: mgsm_sw +filter: +- function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) +- function: take_first +filter_list: +- name: get-answer +include: cot_yaml +task: mgsm_sw_direct diff --git a/lm_eval/tasks/mgsm/mgsm_te_direct.yaml b/lm_eval/tasks/mgsm/mgsm_te_direct.yaml new file mode 100644 index 00000000..0b8250ca --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_te_direct.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: te +doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"ప్రశ్న: + "+question+"\nAnswer"}}{% endif %}' +include: direct_yaml +task: mgsm_te_direct diff --git a/lm_eval/tasks/mgsm/mgsm_te.yaml b/lm_eval/tasks/mgsm/mgsm_te_native-cot.yaml similarity index 69% rename from lm_eval/tasks/mgsm/mgsm_te.yaml rename to lm_eval/tasks/mgsm/mgsm_te_native-cot.yaml index c93049aa..08bb1dd3 100644 --- a/lm_eval/tasks/mgsm/mgsm_te.yaml +++ b/lm_eval/tasks/mgsm/mgsm_te_native-cot.yaml @@ -4,5 +4,11 @@ doc_to_target: '{% if answer is not none %}{{answer[18+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nదశలవారీగా సమాధానం:"}}{% else %}{{"ప్రశ్న: "+question+"\nదశలవారీగా సమాధానం:"}}{% endif %}' -include: common_template_yaml -task: mgsm_te +filter: +- function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) +- function: take_first +filter_list: +- name: get-answer +include: cot_yaml +task: mgsm_te_direct diff --git a/lm_eval/tasks/mgsm/mgsm_th_direct.yaml b/lm_eval/tasks/mgsm/mgsm_th_direct.yaml new file mode 100644 index 00000000..275d1c4d --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_th_direct.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: th +doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"โจทย์: + "+question+"\nAnswer"}}{% endif %}' +include: direct_yaml +task: mgsm_th_direct diff --git a/lm_eval/tasks/mgsm/mgsm_th.yaml b/lm_eval/tasks/mgsm/mgsm_th_native-cot.yaml similarity index 68% rename from lm_eval/tasks/mgsm/mgsm_th.yaml rename to lm_eval/tasks/mgsm/mgsm_th_native-cot.yaml index 1f651ab4..d9b8c9da 100644 --- a/lm_eval/tasks/mgsm/mgsm_th.yaml +++ b/lm_eval/tasks/mgsm/mgsm_th_native-cot.yaml @@ -4,5 +4,11 @@ doc_to_target: '{% if answer is not none %}{{answer[17+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nคำตอบทีละขั้นตอน:"}}{% else %}{{"โจทย์: "+question+"\nคำตอบทีละขั้นตอน:"}}{% endif %}' -include: common_template_yaml -task: mgsm_th +filter: +- function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) +- function: take_first +filter_list: +- name: get-answer +include: cot_yaml +task: mgsm_th_direct diff --git a/lm_eval/tasks/mgsm/mgsm_zh_direct.yaml b/lm_eval/tasks/mgsm/mgsm_zh_direct.yaml new file mode 100644 index 00000000..0b1ab79f --- /dev/null +++ b/lm_eval/tasks/mgsm/mgsm_zh_direct.yaml @@ -0,0 +1,8 @@ +# Generated by utils.py +dataset_name: zh +doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_number|string}}{% + endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"问题: "+question+"\nAnswer"}}{% + endif %}' +include: direct_yaml +task: mgsm_zh_direct diff --git a/lm_eval/tasks/mgsm/mgsm_zh.yaml b/lm_eval/tasks/mgsm/mgsm_zh_native-cot.yaml similarity index 63% rename from lm_eval/tasks/mgsm/mgsm_zh.yaml rename to lm_eval/tasks/mgsm/mgsm_zh_native-cot.yaml index 327e6753..65678633 100644 --- a/lm_eval/tasks/mgsm/mgsm_zh.yaml +++ b/lm_eval/tasks/mgsm/mgsm_zh_native-cot.yaml @@ -4,5 +4,11 @@ doc_to_target: '{% if answer is not none %}{{answer[5+1]}}{% else %}{{answer_num endif %}' doc_to_text: '{% if answer is not none %}{{question+"\n逐步解答:"}}{% else %}{{"问题: "+question+"\n逐步解答:"}}{% endif %}' -include: common_template_yaml -task: mgsm_zh +filter: +- function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) +- function: take_first +filter_list: +- name: get-answer +include: cot_yaml +task: mgsm_zh_direct diff --git a/lm_eval/tasks/mgsm/utils.py b/lm_eval/tasks/mgsm/utils.py index 17620c07..0517f048 100644 --- a/lm_eval/tasks/mgsm/utils.py +++ b/lm_eval/tasks/mgsm/utils.py @@ -6,51 +6,94 @@ LANGUAGES = { "bn": { # Bengali "QUESTION": "\u09aa\u09cd\u09b0\u09b6\u09cd\u09a8:", "ANSWER": "\u09a7\u09be\u09aa\u09c7 \u09a7\u09be\u09aa\u09c7 \u0989\u09a4\u09cd\u09a4\u09b0:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, "de": { # German "QUESTION": "Frage:", "ANSWER": "Schritt-f\u00fcr-Schritt-Antwort:", + "DIRECT": "Antwort:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, "en": { # English "QUESTION": "Question:", "ANSWER": "Step-by-Step Answer:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, "es": { # Spanish "QUESTION": "Pregunta:", "ANSWER": "Respuesta paso a paso:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, "fr": { # French "QUESTION": "Question :", "ANSWER": "R\u00e9ponse \u00e9tape par \u00e9tape :", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, "ru": { # Russian "QUESTION": "\u0417\u0430\u0434\u0430\u0447\u0430:", "ANSWER": "\u041f\u043e\u0448\u0430\u0433\u043e\u0432\u043e\u0435\u0440\u0435\u0448\u0435\u043d\u0438\u0435:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, "sw": { # Swahili "QUESTION": "Swali:", "ANSWER": "Jibu la Hatua kwa Hatua:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, "te": { # Telugu "QUESTION": "\u0c2a\u0c4d\u0c30\u0c36\u0c4d\u0c28:", "ANSWER": "\u0c26\u0c36\u0c32\u0c35\u0c3e\u0c30\u0c40\u0c17\u0c3e \u0c38\u0c2e\u0c3e\u0c27\u0c3e\u0c28\u0c02:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, "th": { # Thai "QUESTION": "\u0e42\u0e08\u0e17\u0e22\u0e4c:", "ANSWER": "\u0e04\u0e33\u0e15\u0e2d\u0e1a\u0e17\u0e35\u0e25\u0e30\u0e02\u0e31\u0e49\u0e19\u0e15\u0e2d\u0e19:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, "ja": { # Japanese "QUESTION": "\u554f\u984c:", "ANSWER": "\u30b9\u30c6\u30c3\u30d7\u3054\u3068\u306e\u7b54\u3048:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, "zh": { # Chinese "QUESTION": "\u95ee\u9898:", "ANSWER": "\u9010\u6b65\u89e3\u7b54:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, } +def add_regex_pattern(regex_pattern): -def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: + if regex_pattern is None: + return {} + return { + "filter_list": [ + { + "name": "get-answer", + }, + ], + "filter": [ + { + "function": "regex", + "regex_pattern": regex_pattern, + }, + { + "function": "take_first", + }, + ], + } + + +def gen_lang_yamls(output_dir: str, overwrite: bool, mode: str) -> None: """ Generate a yaml file for each language. @@ -59,19 +102,36 @@ def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: """ err = [] for lang in LANGUAGES.keys(): - file_name = f"mgsm_{lang}.yaml" try: QUESTION = LANGUAGES[lang]["QUESTION"] - ANSWER = LANGUAGES[lang]["ANSWER"] + + yaml_template = "cot_yaml" + if mode == "direct": + ANSWER = LANGUAGES[lang]["DIRECT"] + REGEX = None + task_name = f"mgsm_{lang}_direct" + yaml_template = "direct_yaml" + elif mode == "native-cot": + ANSWER = LANGUAGES[lang]["ANSWER"] + REGEX = LANGUAGES[lang]["REGEX"] + task_name = f"mgsm_{lang}_native-cot" + elif model == "en-cot": + ANSWER = LANGUAGES["en"]["ANSWER"] + REGEX = LANGUAGES["en"]["REGEX"] + task_name = f"mgsm_{lang}_en-cot" + + file_name = f"{file_name}.yaml" + filter_list = add_regex_pattern(REGEX) + with open( f"{output_dir}/{file_name}", "w" if overwrite else "x", encoding="utf8" ) as f: f.write("# Generated by utils.py\n") yaml.dump( { - "include": "common_template_yaml", + "include": yaml_template, "dataset_name": lang, - "task": f"mgsm_{lang}", + "task": f"mgsm_{lang}_direct", "doc_to_text": f"""{{% if answer is not none %}}""" \ f"""{{{{question+"\\n{ANSWER}"}}}}""" \ f"""{{% else %}}""" \ @@ -82,6 +142,7 @@ def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: f"""{{% else %}}""" \ f"""{{{{answer_number|string}}}}""" \ f"""{{% endif %}}""", + **filter_list }, f, allow_unicode=True, @@ -108,9 +169,12 @@ def main() -> None: parser.add_argument( "--output-dir", default=".", help="Directory to write yaml files to" ) + parser.add_argument( + "--mode", default="native-cot", choices=["direct", "native-cot", "en-cot"], help="Mode of chain-of-thought" + ) args = parser.parse_args() - gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite) + gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite, mode=args.mode) if __name__ == "__main__": -- GitLab From 5a973941924304d1ce4c549ae8a46cae72b876c7 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 16 Aug 2023 16:00:34 +0000 Subject: [PATCH 021/264] reformat --- lm_eval/api/task.py | 2 +- lm_eval/tasks/mgsm/README.md | 2 +- lm_eval/tasks/mgsm/utils.py | 58 +++++++++++++++++++----------------- 3 files changed, 33 insertions(+), 29 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 7e940367..9d3a903e 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -1010,7 +1010,7 @@ class ConfigurableTask(Task): gold = self.doc_to_target(doc) if self._config.doc_to_choice is not None: - # If you set doc_to_choice, + # If you set doc_to_choice, # it assumes that doc_to_target returns a number. choices = self.doc_to_choice(doc) gold = choices[gold] diff --git a/lm_eval/tasks/mgsm/README.md b/lm_eval/tasks/mgsm/README.md index 62b46c56..475b55c3 100644 --- a/lm_eval/tasks/mgsm/README.md +++ b/lm_eval/tasks/mgsm/README.md @@ -71,4 +71,4 @@ For adding novel benchmarks/datasets to the library: If other tasks on this dataset are already supported: * [ ] Is the "Main" variant of this task clearly denoted? * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? -* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? \ No newline at end of file +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/mgsm/utils.py b/lm_eval/tasks/mgsm/utils.py index 0517f048..05071523 100644 --- a/lm_eval/tasks/mgsm/utils.py +++ b/lm_eval/tasks/mgsm/utils.py @@ -71,26 +71,27 @@ LANGUAGES = { }, } + def add_regex_pattern(regex_pattern): if regex_pattern is None: return {} return { - "filter_list": [ - { - "name": "get-answer", - }, - ], - "filter": [ - { - "function": "regex", - "regex_pattern": regex_pattern, - }, - { - "function": "take_first", - }, - ], - } + "filter_list": [ + { + "name": "get-answer", + }, + ], + "filter": [ + { + "function": "regex", + "regex_pattern": regex_pattern, + }, + { + "function": "take_first", + }, + ], + } def gen_lang_yamls(output_dir: str, overwrite: bool, mode: str) -> None: @@ -132,17 +133,17 @@ def gen_lang_yamls(output_dir: str, overwrite: bool, mode: str) -> None: "include": yaml_template, "dataset_name": lang, "task": f"mgsm_{lang}_direct", - "doc_to_text": f"""{{% if answer is not none %}}""" \ - f"""{{{{question+"\\n{ANSWER}"}}}}""" \ - f"""{{% else %}}""" \ - f"""{{{{"{QUESTION} "+question+"\\n{ANSWER}"}}}}""" \ - f"""{{% endif %}}""", - "doc_to_target": f"""{{% if answer is not none %}}""" \ - f"""{{{{answer[{len(ANSWER)}+1]}}}}""" \ - f"""{{% else %}}""" \ - f"""{{{{answer_number|string}}}}""" \ - f"""{{% endif %}}""", - **filter_list + "doc_to_text": f"""{{% if answer is not none %}}""" + f"""{{{{question+"\\n{ANSWER}"}}}}""" + f"""{{% else %}}""" + f"""{{{{"{QUESTION} "+question+"\\n{ANSWER}"}}}}""" + f"""{{% endif %}}""", + "doc_to_target": f"""{{% if answer is not none %}}""" + f"""{{{{answer[{len(ANSWER)}+1]}}}}""" + f"""{{% else %}}""" + f"""{{{{answer_number|string}}}}""" + f"""{{% endif %}}""", + **filter_list, }, f, allow_unicode=True, @@ -170,7 +171,10 @@ def main() -> None: "--output-dir", default=".", help="Directory to write yaml files to" ) parser.add_argument( - "--mode", default="native-cot", choices=["direct", "native-cot", "en-cot"], help="Mode of chain-of-thought" + "--mode", + default="native-cot", + choices=["direct", "native-cot", "en-cot"], + help="Mode of chain-of-thought", ) args = parser.parse_args() -- GitLab From 9f4682a33c330a8d1e53e6efdfd55bc4bfd0c39e Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 16 Aug 2023 16:14:14 +0000 Subject: [PATCH 022/264] fixed typos --- lm_eval/tasks/mgsm/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/mgsm/utils.py b/lm_eval/tasks/mgsm/utils.py index 05071523..3bffd705 100644 --- a/lm_eval/tasks/mgsm/utils.py +++ b/lm_eval/tasks/mgsm/utils.py @@ -116,12 +116,12 @@ def gen_lang_yamls(output_dir: str, overwrite: bool, mode: str) -> None: ANSWER = LANGUAGES[lang]["ANSWER"] REGEX = LANGUAGES[lang]["REGEX"] task_name = f"mgsm_{lang}_native-cot" - elif model == "en-cot": + elif mode == "en-cot": ANSWER = LANGUAGES["en"]["ANSWER"] REGEX = LANGUAGES["en"]["REGEX"] task_name = f"mgsm_{lang}_en-cot" - file_name = f"{file_name}.yaml" + file_name = f"{task_name}.yaml" filter_list = add_regex_pattern(REGEX) with open( -- GitLab From 603eced6302a5ac1803a0abf76ede2ed7e3f2851 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Thu, 17 Aug 2023 00:33:21 +0000 Subject: [PATCH 023/264] update readme on translation tasks --- lm_eval/tasks/translation/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/translation/README.md b/lm_eval/tasks/translation/README.md index 7ce7046b..bd363026 100644 --- a/lm_eval/tasks/translation/README.md +++ b/lm_eval/tasks/translation/README.md @@ -18,7 +18,7 @@ * `wmt14` * `wmt16` * `wmt20` -* `iwslt17` +* `iwslt2017` #### Tasks -- GitLab From 2e2f28a5f3031753f3b5ed6e3fd51165b83ec02a Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Thu, 17 Aug 2023 01:22:29 +0000 Subject: [PATCH 024/264] add XX->en direction to translation tasks --- .../tasks/translation/iwslt2017_ar-en.yaml | 2 +- lm_eval/tasks/translation/utils.py | 83 ++++++++++--------- lm_eval/tasks/translation/wmt14_en-fr.yaml | 14 ++++ lm_eval/tasks/translation/wmt16_en-de.yaml | 14 ++++ lm_eval/tasks/translation/wmt16_en-ro.yaml | 14 ++++ 5 files changed, 86 insertions(+), 41 deletions(-) create mode 100644 lm_eval/tasks/translation/wmt14_en-fr.yaml create mode 100644 lm_eval/tasks/translation/wmt16_en-de.yaml create mode 100644 lm_eval/tasks/translation/wmt16_en-ro.yaml diff --git a/lm_eval/tasks/translation/iwslt2017_ar-en.yaml b/lm_eval/tasks/translation/iwslt2017_ar-en.yaml index 6f8b2e53..739fb6c2 100644 --- a/lm_eval/tasks/translation/iwslt2017_ar-en.yaml +++ b/lm_eval/tasks/translation/iwslt2017_ar-en.yaml @@ -1,5 +1,5 @@ # Generated by utils.py -dataset_name: iwslt2017-ar-en +dataset_name: iwslt2017-en-ar dataset_path: iwslt2017 doc_to_target: ' {{translation["en"]}}' doc_to_text: 'Arabic phrase: {{translation["ar"]}} diff --git a/lm_eval/tasks/translation/utils.py b/lm_eval/tasks/translation/utils.py index 68a1a834..0ed26c2c 100644 --- a/lm_eval/tasks/translation/utils.py +++ b/lm_eval/tasks/translation/utils.py @@ -30,7 +30,7 @@ gpt3_translation_benchmarks = { LANGUAGES = { **gpt3_translation_benchmarks, # "wmt20": sacrebleu.get_langpairs_for_testset("wmt20"), - "iwslt2017": ["en-ar", "ar-en"], # Arabic + "iwslt2017": ["en-ar"], # Arabic } @@ -49,45 +49,48 @@ def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: """ err = [] for lang in LANGUAGES.keys(): - for lang_pair in LANGUAGES[lang]: - file_name = f"{lang}_{lang_pair}.yaml" - try: - src_lang, _, tgt_lang = lang_pair.partition("-") - source, target = code_to_language(src_lang), code_to_language(tgt_lang) - - groups = ["greedy_until", "translation", lang] - if lang in gpt3_translation_benchmarks.keys(): - groups += ["gpt3_translation_benchmarks"] - - with open( - f"{output_dir}/{file_name}", - "w" if overwrite else "x", - encoding="utf8", - ) as f: - f.write("# Generated by utils.py\n") - yaml.dump( - { - "include": "wmt_common_yaml", - "group": groups, - "dataset_path": lang, - "dataset_name": lang_pair - if not (lang == "iwslt2017") - else "iwslt2017-" + lang_pair, - "task": f"{lang}-{lang_pair}", - "doc_to_text": f"{source} phrase: " - + "{{translation[" - + f'"{src_lang}"' - + "]}}\n" - + f"{target} phrase:", - "doc_to_target": " {{" - + "translation[" - + f'"{tgt_lang}"]' - + "}}", - }, - f, - ) - except FileExistsError: - err.append(file_name) + for dataset_name in LANGUAGES[lang]: + src_lang, _, tgt_lang = dataset_name.partition("-") + for src, tgt in [[src_lang, tgt_lang], [tgt_lang, src_lang]]: + # both translation directions for each lang pair + lang_pair = src + "-" + tgt + file_name = f"{lang}_{lang_pair}.yaml" + try: + source, target = code_to_language(src), code_to_language(tgt) + + groups = ["greedy_until", "translation", lang] + if lang in gpt3_translation_benchmarks.keys(): + groups += ["gpt3_translation_benchmarks"] + + with open( + f"{output_dir}/{file_name}", + "w" if overwrite else "x", + encoding="utf8", + ) as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": "wmt_common_yaml", + "group": groups, + "dataset_path": lang, + "dataset_name": dataset_name + if not (lang == "iwslt2017") + else "iwslt2017-" + dataset_name, + "task": f"{lang}-{lang_pair}", + "doc_to_text": f"{source} phrase: " + + "{{translation[" + + f'"{src}"' + + "]}}\n" + + f"{target} phrase:", + "doc_to_target": " {{" + + "translation[" + + f'"{tgt}"]' + + "}}", + }, + f, + ) + except FileExistsError: + err.append(file_name) if len(err) > 0: raise FileExistsError( diff --git a/lm_eval/tasks/translation/wmt14_en-fr.yaml b/lm_eval/tasks/translation/wmt14_en-fr.yaml new file mode 100644 index 00000000..154b0698 --- /dev/null +++ b/lm_eval/tasks/translation/wmt14_en-fr.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: fr-en +dataset_path: wmt14 +doc_to_target: ' {{translation["fr"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + French phrase:' +group: +- greedy_until +- translation +- wmt14 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt14-en-fr diff --git a/lm_eval/tasks/translation/wmt16_en-de.yaml b/lm_eval/tasks/translation/wmt16_en-de.yaml new file mode 100644 index 00000000..e7ac0d77 --- /dev/null +++ b/lm_eval/tasks/translation/wmt16_en-de.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: de-en +dataset_path: wmt16 +doc_to_target: ' {{translation["de"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + German phrase:' +group: +- greedy_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-en-de diff --git a/lm_eval/tasks/translation/wmt16_en-ro.yaml b/lm_eval/tasks/translation/wmt16_en-ro.yaml new file mode 100644 index 00000000..c214b56c --- /dev/null +++ b/lm_eval/tasks/translation/wmt16_en-ro.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: ro-en +dataset_path: wmt16 +doc_to_target: ' {{translation["ro"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + Romanian phrase:' +group: +- greedy_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-en-ro -- GitLab From b5ce475d23a8ed56db8810806c4a760eddcf229a Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Thu, 17 Aug 2023 01:22:46 +0000 Subject: [PATCH 025/264] re-enable MultiChoice on --tasks --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index 169ef466..1c2e6f6f 100644 --- a/main.py +++ b/main.py @@ -24,7 +24,7 @@ def parse_args(): help="String arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`", ) parser.add_argument( - "--tasks", default=None # , choices=utils.MultiChoice(sorted(ALL_TASKS)) + "--tasks", default=None, choices=utils.MultiChoice(sorted(ALL_TASKS)) ) parser.add_argument( "--num_fewshot", -- GitLab From 4156a0051e8a7fdb6ad7b8fb043a15ca8353ae98 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Thu, 17 Aug 2023 01:23:05 +0000 Subject: [PATCH 026/264] improve --write_out --- lm_eval/evaluator.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index c05926b1..23fc4aab 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -252,7 +252,8 @@ def evaluate( # print the prompt for the first few documents if inst.doc_id < 1: eval_logger.info( - f"Task: {task_name}; document {inst.doc_id}; context prompt (starting on next line):\n{inst.args[0]}\n(end of prompt on previous line)" + f"Task: {task_name}; document {inst.doc_id}; context prompt (starting on next line):\ +\n{inst.args[0]}\n(end of prompt on previous line)\ntarget string or answer choice index (starting on next line):\n{task.doc_to_target(inst.doc)}\n(end of target on previous line)" ) eval_logger.info(f"Request: {str(inst)}") -- GitLab From 8436e11f5dcebbd17f96abfe41d575bb838ceee4 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 22 Aug 2023 01:59:30 +0000 Subject: [PATCH 027/264] initial commit for nq_open --- lm_eval/tasks/nq_open/README.md | 0 lm_eval/tasks/nq_open/nq_open.yaml | 29 +++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 lm_eval/tasks/nq_open/README.md create mode 100644 lm_eval/tasks/nq_open/nq_open.yaml diff --git a/lm_eval/tasks/nq_open/README.md b/lm_eval/tasks/nq_open/README.md new file mode 100644 index 00000000..e69de29b diff --git a/lm_eval/tasks/nq_open/nq_open.yaml b/lm_eval/tasks/nq_open/nq_open.yaml new file mode 100644 index 00000000..31d7288a --- /dev/null +++ b/lm_eval/tasks/nq_open/nq_open.yaml @@ -0,0 +1,29 @@ +task: nq_open +dataset_path: nq_open +output_type: greedy_until +training_split: train +validation_split: validation +description: "Answer these questions:\n\n" +doc_to_text: "Q: {{question}}?\nA:" +doc_to_target: "{{answer}}" # TODO: should be multi-target +generation_kwargs: + until: + - "\n" + - "." + - "," + do_sample: false + temperature: 0.0 +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + regexes_to_ignore: + - "\ban|a|the\b" -- GitLab From b1d468f25f0041dca2664345ac82fc0d56c518d1 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 22 Aug 2023 02:02:29 +0000 Subject: [PATCH 028/264] fixes to multiple_target --- lm_eval/api/task.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 50c53764..9840c006 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -669,9 +669,9 @@ class ConfigurableTask(Task): check_choices = test_target for choice in check_choices: - choice_has_whitespace = True if " " in choice else False + choice_has_whitespace = True if choice.startswith(" ") else False delimiter_has_whitespace = ( - True if " " in self._config.target_delimiter else False + True if self._config.target_delimiter.startswith(" ") else False ) if delimiter_has_whitespace and choice_has_whitespace: @@ -1050,6 +1050,10 @@ class ConfigurableTask(Task): # return true if any are true # TODO: this may break for multipLe_target, non zero-or-1 metrics scores = [] + if not isinstance(gold, list): + # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer + # print(gold) + gold = [gold] for gold_option in gold: res = self._metric_fn_list[key]( references=[gold_option], -- GitLab From eaa7afe2597a1fd0c7f07fceb049475fc9dd6f0d Mon Sep 17 00:00:00 2001 From: Hailey Schoelkopf <65563625+haileyschoelkopf@users.noreply.github.com> Date: Tue, 22 Aug 2023 00:39:26 -0400 Subject: [PATCH 029/264] Create user_guide.md --- docs/user_guide.md | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 docs/user_guide.md diff --git a/docs/user_guide.md b/docs/user_guide.md new file mode 100644 index 00000000..86cc5df0 --- /dev/null +++ b/docs/user_guide.md @@ -0,0 +1,9 @@ +# User Guide + + +## Command-line Interface + + +## External Library Usage + +* We support 2 ways of calling the library, for use within training loops or other scripts. -- GitLab From f96f5fad4ced4b23afa7a5071946ad9637147412 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 22 Aug 2023 10:17:51 +0000 Subject: [PATCH 030/264] add endspace --- ignore.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignore.txt b/ignore.txt index 5f204967..cde618d0 100644 --- a/ignore.txt +++ b/ignore.txt @@ -4,4 +4,4 @@ nin maka mor te -ond \ No newline at end of file +ond -- GitLab From d4e075e3f672b89b83fc39bb51507795ff02b7f4 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 22 Aug 2023 15:22:24 +0000 Subject: [PATCH 031/264] added truncation option --- lm_eval/models/huggingface.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index af6ba37c..5b77d151 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -69,6 +69,7 @@ class HFLM(LM): revision: Optional[str] = "main", subfolder: Optional[str] = None, tokenizer: Optional[str] = None, + truncation: Optional[bool] = False, max_length: Optional[int] = None, device: Optional[str] = "cuda", dtype: Optional[Union[str, torch.dtype]] = "auto", @@ -243,6 +244,8 @@ class HFLM(LM): use_fast=use_fast_tokenizer, ) + self.truncation = truncation + self.vocab_size = self.tokenizer.vocab_size self.tokenizer.pad_token_id = self.tokenizer.eos_token_id @@ -422,7 +425,7 @@ class HFLM(LM): return encoding def tok_batch_encode( - self, strings: List[str], padding_side="left", left_truncate_len=None + self, strings: List[str], padding_side="left", left_truncate_len=None, truncation=False ): # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode. old_padding_side = self.tokenizer.padding_side @@ -435,6 +438,7 @@ class HFLM(LM): encoding = self.tokenizer( strings, + truncation=truncation, padding="longest", return_tensors="pt", add_special_tokens=add_special_tokens, @@ -859,7 +863,7 @@ class HFLM(LM): # encode, pad, and truncate contexts for this batch context_enc, attn_masks = self.tok_batch_encode( - contexts, left_truncate_len=max_ctx_len + contexts, left_truncate_len=max_ctx_len, truncation=self.truncation, ) context_enc = context_enc.to(self.device) attn_masks = attn_masks.to(self.device) -- GitLab From f7a0235d8467bcf6fe1402f2b64f0c499dea2f28 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 22 Aug 2023 15:22:46 +0000 Subject: [PATCH 032/264] update yaml --- lm_eval/tasks/super_glue/multirc/t5-prompt.yaml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml b/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml index e7a286fc..282b0668 100644 --- a/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml @@ -9,9 +9,12 @@ output_type: greedy_until doc_to_text: "multirc question: {{question}} answer: {{answer}} paragraph: {{paragraph}}" doc_to_target: label doc_to_choice: ['False', 'True'] +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.5 metric_list: - - metric: exact_match - aggregation: mean + - metric: !function t5_utils.f1 + aggregation: !function t5_utils.agg_f1 higher_is_better: true - ignore_case: true - ignore_punctuation: true -- GitLab From 80d88899f9b06e7d6108900ea40833598f239189 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 22 Aug 2023 16:47:54 +0000 Subject: [PATCH 033/264] fix test_choice test --- lm_eval/api/task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index e18cacc3..ded7f5c4 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -659,7 +659,7 @@ class ConfigurableTask(Task): self.multiple_target = len(test_target) else: if (type(test_target) is int) and (test_choice is not None): - test_target = [self.doc_to_choice(test_target)[test_target]] + test_target = [test_choice[test_target]] else: test_target = [test_target] -- GitLab From 4a768feb23e8534896fe291d2f8f0e81b7a32ad0 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 22 Aug 2023 17:13:12 +0000 Subject: [PATCH 034/264] add cb metrics --- lm_eval/tasks/super_glue/cb/t5_utils.py | 31 +++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 lm_eval/tasks/super_glue/cb/t5_utils.py diff --git a/lm_eval/tasks/super_glue/cb/t5_utils.py b/lm_eval/tasks/super_glue/cb/t5_utils.py new file mode 100644 index 00000000..82471538 --- /dev/null +++ b/lm_eval/tasks/super_glue/cb/t5_utils.py @@ -0,0 +1,31 @@ +import sklearn.metrics + +def mean_3class_f1(predictions, references): # This is a passthrough function + + string_label = ['entailment', 'contradiction', 'neutral'] + predictions = string_label.index(predictions[0]) + references = string_label.index(references[0]) + + return (predictions, references) + +def agg_mean_3class_f1(items): + + predictions, references = zip(*items) + + """Computes the unweighted average of the F1 per class.""" + metric_str = "fbeta_score" + metric_post_process_fn = lambda x: x + metric_fn_kwargs = { + "beta": 1, + "labels": range(3), + "average": "macro", + } + + def _fn(predictions, references): + metric_fn = getattr(sklearn.metrics, metric_str) + metric_val = metric_fn(references, predictions, **metric_fn_kwargs) + if metric_post_process_fn is not None: + metric_val = metric_post_process_fn(metric_val) + return metric_val + + return _fn(predictions, references) -- GitLab From 8c93320ae03518b74730417b6d895e7ccb916143 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 22 Aug 2023 17:13:24 +0000 Subject: [PATCH 035/264] add multirc metrics --- .../tasks/super_glue/multirc/t5-prompt.yaml | 5 +- lm_eval/tasks/super_glue/multirc/t5_utils.py | 47 +++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 lm_eval/tasks/super_glue/multirc/t5_utils.py diff --git a/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml b/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml index 282b0668..d0a62652 100644 --- a/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml @@ -8,7 +8,7 @@ validation_split: validation output_type: greedy_until doc_to_text: "multirc question: {{question}} answer: {{answer}} paragraph: {{paragraph}}" doc_to_target: label -doc_to_choice: ['False', 'True'] +doc_to_choice: "{% set group_id = idx.question|string %}{{[group_id+'_False', group_id+'_True']}}" generation_kwargs: until: - "" @@ -18,3 +18,6 @@ metric_list: - metric: !function t5_utils.f1 aggregation: !function t5_utils.agg_f1 higher_is_better: true + - metric: !function t5_utils.em + aggregation: !function t5_utils.agg_em + higher_is_better: true diff --git a/lm_eval/tasks/super_glue/multirc/t5_utils.py b/lm_eval/tasks/super_glue/multirc/t5_utils.py new file mode 100644 index 00000000..29aaa1e1 --- /dev/null +++ b/lm_eval/tasks/super_glue/multirc/t5_utils.py @@ -0,0 +1,47 @@ +import collections + +import numpy as np +import sklearn.metrics + +def f1(predictions, references): # This is a passthrough function + + _prediction = predictions[0] + _reference = references[0].split("_")[-1] + string_label = ['False', 'True'] + reference = string_label.index(_reference) + prediction = string_label.index(_prediction) if _prediction in string_label else not bool(reference) + + return (prediction, reference) + +def agg_f1(items): + + predictions, references = zip(*items) + references, predictions = np.asarray(references), np.asarray(predictions) + + return sklearn.metrics.f1_score(references, predictions) + + +def em(predictions, references): # This is a passthrough function + + _prediction = predictions[0] + _group, _reference = references[0].split("_") + string_label = ['False', 'True'] + reference = string_label.index(_reference) + prediction = string_label.index(_prediction) if _prediction in string_label else not bool(reference) + + return (_group, prediction, reference) + + +def agg_em(items): + + grouped_values = collections.defaultdict(lambda: ([], [])) + for group, prediction, reference in items: + grouped_values[group][0].append(reference) + grouped_values[group][1].append(prediction) + + group_scores = [] + for group, (targets, predictions) in grouped_values.items(): + score = float(np.array_equal(targets, predictions)) + group_scores.append(score) + + return np.mean(group_scores) \ No newline at end of file -- GitLab From 18f7dfb3c5548c2b65124e8095802b9ecc890113 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 23 Aug 2023 14:21:19 +0000 Subject: [PATCH 036/264] temp --- lm_eval/tasks/super_glue/record/t5-prompt.yaml | 1 + lm_eval/tasks/super_glue/wsc/t5-prompt.yaml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/lm_eval/tasks/super_glue/record/t5-prompt.yaml b/lm_eval/tasks/super_glue/record/t5-prompt.yaml index e674fed0..ba811d4d 100644 --- a/lm_eval/tasks/super_glue/record/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/record/t5-prompt.yaml @@ -6,6 +6,7 @@ dataset_name: record training_split: train validation_split: validation output_type: greedy_until +process_doc: !function t5_utils.process_doc doc_to_text: "record query: {{query}} entities: {{entities|join(\", \")}} passage: {{passage}}" doc_to_target: "{{answers}}" metric_list: diff --git a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml index bf9920d0..2fe33430 100644 --- a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml @@ -7,7 +7,7 @@ training_split: train validation_split: validation output_type: greedy_until doc_to_text: !function "t5_utils.t5_prompt_doc_to_text" -doc_to_target: span1_text +doc_to_target: "{{[span1_text, span2_text][label]}}" metric_list: - metric: exact_match aggregation: mean -- GitLab From 691b4ca979a3bc7581d27c52650116258138e7b4 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 23 Aug 2023 15:13:45 +0000 Subject: [PATCH 037/264] temp --- lm_eval/tasks/super_glue/record/t5-prompt.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/super_glue/record/t5-prompt.yaml b/lm_eval/tasks/super_glue/record/t5-prompt.yaml index ba811d4d..c33a026c 100644 --- a/lm_eval/tasks/super_glue/record/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/record/t5-prompt.yaml @@ -6,8 +6,8 @@ dataset_name: record training_split: train validation_split: validation output_type: greedy_until -process_doc: !function t5_utils.process_doc -doc_to_text: "record query: {{query}} entities: {{entities|join(\", \")}} passage: {{passage}}" +process_docs: !function t5_utils.process_docs +doc_to_text: !function t5_utils.doc_to_text doc_to_target: "{{answers}}" metric_list: - metric: exact_match -- GitLab From 8b1e652f3d38d580d0961dbd9b47c71a539b826f Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Thu, 24 Aug 2023 02:27:24 +0100 Subject: [PATCH 038/264] added multiple python check --- .github/workflows/unit_tests.yml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index c1c9f313..e734a565 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -26,7 +26,8 @@ jobs: uses: actions/setup-python@v4 with: python-version: 3.9 - cache: 'pip' + cache: pip + cache-dependency-path: setup.py - name: Install dependencies run: pip install -e '.[linting,testing]' --extra-index-url https://download.pytorch.org/whl/cpu - name: Pre-Commit @@ -46,16 +47,20 @@ jobs: testcpu: name: CPU Tests runs-on: ubuntu-latest + strategy: + matrix: + python-version: [ "3.8", "3.9", "3.10", "3.11" ] timeout-minutes: 20 steps: - name: Checkout Code uses: actions/checkout@v3 - - name: Set up Python 3.9 + - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: - python-version: 3.9 - cache: 'pip' + python-version: ${{ matrix.python-version }} + cache: pip + cache-dependency-path: setup.py - name: Install dependencies run: | python -m pip install --upgrade pip -- GitLab From 71ab0f2cbed0663e850a961a4352f5a58c7188d3 Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Thu, 24 Aug 2023 02:38:36 +0100 Subject: [PATCH 039/264] added setup caching --- .github/workflows/new_tasks.yml | 1 + .github/workflows/unit_tests.yml | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/new_tasks.yml b/.github/workflows/new_tasks.yml index 0fdbdb63..a14e251c 100644 --- a/.github/workflows/new_tasks.yml +++ b/.github/workflows/new_tasks.yml @@ -51,6 +51,7 @@ jobs: with: python-version: 3.9 cache: 'pip' + cache-dependency-path: setup.py - name: Install dependencies if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' run: | diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index e734a565..d5da1a55 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -49,8 +49,8 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [ "3.8", "3.9", "3.10", "3.11" ] - timeout-minutes: 20 + python-version: [ "3.9", "3.10", "3.11" ] + timeout-minutes: 30 steps: - name: Checkout Code @@ -65,7 +65,7 @@ jobs: run: | python -m pip install --upgrade pip pip install -e '.[testing,anthropic,sentencepiece]' --extra-index-url https://download.pytorch.org/whl/cpu - # Install optional git dependencies +# Install optional git dependencies # pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - name: Test with pytest -- GitLab From cb4858837505dfead1e096cc899afabe08722114 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 24 Aug 2023 14:00:21 +0000 Subject: [PATCH 040/264] filter takes docs as argument in case filtering requires it --- lm_eval/api/filter.py | 8 ++++---- lm_eval/api/task.py | 17 +++++++++++++---- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/lm_eval/api/filter.py b/lm_eval/api/filter.py index 393623a7..cc74634f 100644 --- a/lm_eval/api/filter.py +++ b/lm_eval/api/filter.py @@ -2,7 +2,7 @@ from dataclasses import dataclass from typing import List from lm_eval.api.instance import Instance - +from datasets import Dataset class Filter: """ @@ -18,7 +18,7 @@ class Filter: Can define custom behavior here, if an individual instantiation of a Filter class should have state. """ - def apply(self, resps): + def apply(self, resps, docs): """ Defines the operation to perform on a list of the `inst.resps` properties of `Instance` objects. Should return the list of (filtered) response lists *in the same order as they were input*, e.g. @@ -40,14 +40,14 @@ class FilterEnsemble: name: str filters: List[Filter] - def apply(self, instances: List[Instance]): + def apply(self, instances: List[Instance], docs: List[Dataset]): resps = [ inst.resps for inst in instances ] # operate just on the model responses for f in self.filters: # apply filters in sequence - resps = f.apply(resps) + resps = f.apply(resps, docs) # add the end results after filtering to filtered_requests of their respective source instances. # has key `self.name`: each FilterEnsemble applied in a given run should use a different name. diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index e6d8d96d..cae48368 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -627,19 +627,19 @@ class ConfigurableTask(Task): ) if self.has_test_docs(): - docs = self.test_docs() + self.task_docs = self.test_docs() elif self.has_validation_docs(): - docs = self.validation_docs() + self.task_docs = self.validation_docs() else: assert ( False ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!" # Test One Doc - self.features = list(docs.features.keys()) + self.features = list(self.task_docs.features.keys()) self.multiple_input = 0 self.multiple_target = 0 - test_doc = docs[0] + test_doc = self.task_docs[0] test_text = self.doc_to_text(test_doc) test_target = self.doc_to_target(test_doc) @@ -743,6 +743,15 @@ class ConfigurableTask(Task): ) return super().fewshot_docs() + def apply_filters(self): + + if hasattr(self, "_filters"): + for f in self._filters: + f.apply(self._instances, self.task_docs) + else: + eval_logger.warning("No filter defined, passing through instances") + return self._instances + def should_decontaminate(self): return self._config.should_decontaminate -- GitLab From 007d485b6c84b0bb9635ba81e29b3d7c42a9ab55 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 24 Aug 2023 14:00:48 +0000 Subject: [PATCH 041/264] modified apply method to accept docs --- lm_eval/filters/__init__.py | 6 ++++-- lm_eval/filters/decontamination.py | 2 +- lm_eval/filters/extraction.py | 4 ++-- lm_eval/filters/selection.py | 4 ++-- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/lm_eval/filters/__init__.py b/lm_eval/filters/__init__.py index cdc0d159..54578cb4 100644 --- a/lm_eval/filters/__init__.py +++ b/lm_eval/filters/__init__.py @@ -17,14 +17,16 @@ FILTER_REGISTRY = { def get_filter(filter_name): - return FILTER_REGISTRY[filter_name] + if filter_name in FILTER_REGISTRY: + return FILTER_REGISTRY[filter_name] + else: + return filter_name def build_filter_ensemble(filter_name, components): """ Create a filtering pipeline. """ - filters = [] for (function, kwargs) in components: if kwargs is None: diff --git a/lm_eval/filters/decontamination.py b/lm_eval/filters/decontamination.py index d95db3bb..ac1e29ac 100644 --- a/lm_eval/filters/decontamination.py +++ b/lm_eval/filters/decontamination.py @@ -17,7 +17,7 @@ class DecontaminationFilter(Filter): """ self._decontam_results = None - def apply(self, reps): + def apply(self, reps, docs): """ Return {"no_contamination", "only_contamination"} keys for the 2 different subsets """ diff --git a/lm_eval/filters/extraction.py b/lm_eval/filters/extraction.py index 1eefc2f6..b308a167 100644 --- a/lm_eval/filters/extraction.py +++ b/lm_eval/filters/extraction.py @@ -15,7 +15,7 @@ class RegexFilter(Filter): self.regex = re.compile(regex_pattern) self.fallback = fallback - def apply(self, resps): + def apply(self, resps, docs): # here, we assume we have a list, in which each element is # a list of model responses for some particular input/target pair. # so we process each of these (same input/target response sets) @@ -44,7 +44,7 @@ class WhitespaceFilter(Filter): def __init__(self): pass - def apply(self, resps): + def apply(self, resps, docs): def filter_set(inst): filtered_resp = [] diff --git a/lm_eval/filters/selection.py b/lm_eval/filters/selection.py index ef8ff5f4..d5c412f9 100644 --- a/lm_eval/filters/selection.py +++ b/lm_eval/filters/selection.py @@ -23,7 +23,7 @@ class TakeKFilter(Filter): super().__init__(*args, **kwargs) - def apply(self, resps): + def apply(self, resps, docs): # check we have at least k responses per doc, else we can't take the first k assert ( len(resps[0]) >= self.k @@ -37,7 +37,7 @@ class MajorityVoteFilter(Filter): Can define custom behavior here, if an individual instantiation of a Filter class should have state. """ - def apply(self, resps): + def apply(self, resps, docs): """ Each entry of `resps` is a list of model responses. We select the response that occurs most frequently in each entry of `resps`. -- GitLab From faa2e43d7a16d2de5b321855f00ba741d69ee417 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 24 Aug 2023 15:27:00 +0000 Subject: [PATCH 042/264] add wsc utils --- lm_eval/tasks/super_glue/wsc/t5-prompt.yaml | 14 ++-- lm_eval/tasks/super_glue/wsc/t5_utils.py | 88 +++++++++++++++++++++ 2 files changed, 96 insertions(+), 6 deletions(-) create mode 100644 lm_eval/tasks/super_glue/wsc/t5_utils.py diff --git a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml index 2fe33430..6c443257 100644 --- a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml @@ -2,15 +2,17 @@ group: - super-glue-t5-prompt task: super_glue-wsc-t5-prompt dataset_path: super_glue -dataset_name: wsc +dataset_name: wsc.fixed training_split: train validation_split: validation output_type: greedy_until -doc_to_text: !function "t5_utils.t5_prompt_doc_to_text" -doc_to_target: "{{[span1_text, span2_text][label]}}" +doc_to_text: !function "t5_utils.doc_to_text" +doc_to_target: label metric_list: - - metric: exact_match + - metric: accuracy aggregation: mean higher_is_better: true - ignore_case: true - ignore_punctuation: true +filter_list: + - name: "wsc_postprocessor" + filter: + - function: !function t5_utils.WSCPostprocess \ No newline at end of file diff --git a/lm_eval/tasks/super_glue/wsc/t5_utils.py b/lm_eval/tasks/super_glue/wsc/t5_utils.py new file mode 100644 index 00000000..801ad6e5 --- /dev/null +++ b/lm_eval/tasks/super_glue/wsc/t5_utils.py @@ -0,0 +1,88 @@ +import re +from lm_eval.api.filter import Filter + +def doc_to_text(x): + text = re.sub(r" X ", " *"+x["span2_text"]+"* ", _wsc_inputs(x)) + return "wsc: "+text + + +def _wsc_inputs(x): + + words = x['text'].split(" ") + + # We would need some special logic to handle the case where the pronoun is the + # first or last word in the text. None of the examples in WSC seem to have + # this, so we are ignoring these cases. + assert x['span2_index'] > 0 + assert x['span2_index'] < len(words) + pronoun_index = x['span2_index'] + + def create_input(): + assert words[pronoun_index] == x['span2_text'] + + return " ".join([ + " ".join( + words[:pronoun_index] + ), + 'X', + " ".join( + words[pronoun_index + 1:] + ), + ]) + + # Handle some special cases. + if x['text'] == 'The boy continued to whip the pony , and eventually the pony threw him over. John laughed out quite loud. \"Good for him,\" he said. ': + return ( + 'The boy continued to whip the pony , and eventually the pony threw ' + 'him over. John laughed out quite loud. "Good for X ," he said.' + ) + + # Using the span2_index, we get 'use' instead of 'it'. + if x['text'] == 'When they had eventually calmed down a bit , and had gotten home, Mr. Farley put the magic pebble in an iron safe . Some day they might want to use it , but really for now, what more could they wish for?': + return ( + 'When they had eventually calmed down a bit , and had gotten home, ' + 'Mr. Farley put the magic pebble in an iron safe . Some day they might ' + 'want to use X , but really for now, what more could they wish for?' + ) + + return create_input() + + +class WSCPostprocess(Filter): + + def __init__(self, **kwargs): + + self.determiners = { + "a", "an", "few", "her", "his", "each", "every", "many", "much", "my", + "our", "some", "that", "the", "their", "these", "this", "those", "which", + "whose", "your" + } + + def clean(self, s): + """Ignore capitalization and determiners.""" + s = s.strip().lower() + return " ".join([w for w in s.split(" ") if w not in self.determiners]) + + def apply(self, resps, docs): + + filtered_resps = [] + for prediction, reference in zip(*(resps, docs["span1_text"])): + + prediction = self.clean(prediction[0]) + reference = self.clean(reference) + + if ("'" in prediction) != ("'" in reference): + # referent is "Bob's hat" as predicting the referent. + predicted_referent = False + else: + prediction_words = set(prediction.split(" ")) + referent_words = set(reference.split(" ")) + + # Handle cases where the prediction is "fuzzy bunny" and the referent is + # "bunny". + predicted_referent = prediction_words.issubset( + referent_words) or referent_words.issubset(prediction_words) + + filtered_resps.append(predicted_referent) + + return filtered_resps -- GitLab From 599205c478357e9ef919d8aa85a7799a2df2a581 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 24 Aug 2023 17:02:21 +0000 Subject: [PATCH 043/264] edit to accept docs --- lm_eval/filters/selection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/filters/selection.py b/lm_eval/filters/selection.py index d5c412f9..05d8c7e7 100644 --- a/lm_eval/filters/selection.py +++ b/lm_eval/filters/selection.py @@ -9,7 +9,7 @@ class TakeFirstFilter(Filter): Can define custom behavior here, if an individual instantiation of a Filter class should have state. """ - def apply(self, resps): + def apply(self, resps, docs): """ Assuming each entry of `resps` is a list of model responses, we discard all but the first response. """ -- GitLab From adbbfb44d33a363a2a2a09d37744c646c4ba5c0e Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 24 Aug 2023 17:02:38 +0000 Subject: [PATCH 044/264] adding record processing --- .../tasks/super_glue/record/t5-prompt.yaml | 12 +- lm_eval/tasks/super_glue/record/t5_utils.py | 159 ++++++++++++++++++ 2 files changed, 165 insertions(+), 6 deletions(-) create mode 100644 lm_eval/tasks/super_glue/record/t5_utils.py diff --git a/lm_eval/tasks/super_glue/record/t5-prompt.yaml b/lm_eval/tasks/super_glue/record/t5-prompt.yaml index c33a026c..82dcc383 100644 --- a/lm_eval/tasks/super_glue/record/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/record/t5-prompt.yaml @@ -3,15 +3,15 @@ group: task: super_glue-record-t5-prompt dataset_path: super_glue dataset_name: record -training_split: train validation_split: validation output_type: greedy_until process_docs: !function t5_utils.process_docs doc_to_text: !function t5_utils.doc_to_text -doc_to_target: "{{answers}}" +doc_to_target: "{{idx.passage|string}}+{{idx.query}}_{{answers}}" metric_list: - - metric: exact_match - aggregation: mean + - metric: !function t5_utils.em + aggregation: !function t5_utils.squad_em_agg + higher_is_better: true + - metric: !function t5_utils.f1 + aggregation: !function t5_utils.squad_f1_agg higher_is_better: true - ignore_case: true - ignore_punctuation: true diff --git a/lm_eval/tasks/super_glue/record/t5_utils.py b/lm_eval/tasks/super_glue/record/t5_utils.py new file mode 100644 index 00000000..8431b62f --- /dev/null +++ b/lm_eval/tasks/super_glue/record/t5_utils.py @@ -0,0 +1,159 @@ +import re +import string +import collections +import numpy as np + +from tqdm import tqdm +from datasets import Dataset, concatenate_datasets + +from lm_eval.api.metrics import metric_max_over_ground_truths + + +def doc_to_text(doc): + + passage = doc['passage'] + passage = re.sub(r'(\.|\?|\!|\"|\')\n@highlight\n', r'\1 ', passage) + passage = re.sub(r'\n@highlight\n', '. ', passage) + + return " ".join([ + "record query:", + doc['query'], + "entities:", + ", ".join(doc['entities']), + "passage:", + passage + ]) + + +def process_docs(dataset): + + def split_answers(doc): + split_doc = { + **{k: [] for k in doc.keys()}, + } + answers = doc.pop("answers") + for idx, answer in enumerate(answers): + + for key in split_doc.keys(): + if key in doc: + split_doc[key].append(doc[key]) + + split_doc["answers"].append(answer) + return split_doc + + dataset = dataset.map(split_answers) + new_dataset = {} + for key in dataset.features.keys(): + new_dataset[key] = [x for row in dataset[key] for x in row] + + return Dataset.from_dict(new_dataset) + + +def deduplicate_metric(metric_fn, + group_key: str = "group", + value_key: str = "value"): + """Returns a metric that only considers one example per group. + + Useful for things like ReCoRD where inputs may be replicated during training + to handle multiple labels, but where at eval we only want a single copy of + each example. + + Args: + metric_fn: function, the metric to compute on the unique examples. + group_key: the key for the grouping value in the target dictionary. + value_key: the key for the value in the dictionaries. + + Returns: + A metric function that deduplicated based on the grouping key before + returning a metric. + """ + def _deduplicated_metric(targets, predictions): + """Deduplicate targets and predictions and pass that to the metric fn.""" + processed_groups = set() + deduplicated_targets = [] + deduplicated_predictions = [] + for targ, pred in zip(targets, predictions): + group = targ[group_key] + if group in processed_groups: + continue + processed_groups.add(group) + deduplicated_targets.append(targ[value_key]) + deduplicated_predictions.append(pred[value_key]) + return metric_fn(deduplicated_targets, deduplicated_predictions) + return _deduplicated_metric + + +def normalize_squad(answer): + """Normalization used in official SQuAD evaluation script.""" + def _normalize_answer(text, punc_chars, punc_repl): + """Lower text and remove punctuation, articles and extra whitespace.""" + + def remove_articles(s): + return re.sub(r"\b(a|an|the)\b", " ", s) + + def replace_punctuation(s): + to_replace = set(punc_chars) + return "".join(punc_repl if ch in to_replace else ch for ch in s) + + def white_space_fix(s): + return " ".join(s.split()) + + text = text.lower() + text = replace_punctuation(text) + text = remove_articles(text) + text = white_space_fix(text) + + return text + + return _normalize_answer(answer, punc_chars=string.punctuation, punc_repl="") + +def em(predictions, references): # This is a passthrough function + return (predictions[0], references[0]) + +def f1(predictions, references): # This is a passthrough function + return (predictions[0], references[0]) + +def squad_em_agg(items): + + def _exact_match_score(target, prediction): + return target == prediction + + grouped_values = collections.defaultdict(lambda: ([], [])) + for prediction, reference in items: + group, reference = reference.split("_") + grouped_values[group][0].append(normalize_squad(prediction)) + grouped_values[group][1].append(normalize_squad(reference)) + + print(grouped_values) + import sys; sys.exit() + em = np.mean([ + metric_max_over_ground_truths(_exact_match_score, t, p) + for p, t in zip(predictions, targets) + ]) + return em + +def squad_f1_agg(items): + + def _f1_score(target, prediction): + """Computes token f1 score for a single target and prediction.""" + prediction_tokens = prediction.split() + target_tokens = target.split() + common = (collections.Counter(prediction_tokens) & + collections.Counter(target_tokens)) + num_same = sum(common.values()) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(prediction_tokens) + recall = 1.0 * num_same / len(target_tokens) + f1 = (2 * precision * recall) / (precision + recall) + return f1 + + predictions, targets = zip(*items) + targets = [normalize_squad(t) for t in targets] + predictions = [normalize_squad(p) for p in predictions] + + f1 = np.mean([ + metric_max_over_ground_truths(_f1_score, t, p) + for p, t in zip(predictions, targets) + ]) + return f1 -- GitLab From caac08430e6e610466f7cf40eaf03f3c2bd23ad2 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Fri, 25 Aug 2023 04:27:34 +0000 Subject: [PATCH 045/264] reformat --- lm_eval/api/filter.py | 1 + lm_eval/models/huggingface.py | 12 +- lm_eval/tasks/super_glue/boolq/t5-prompt.yaml | 1 - lm_eval/tasks/super_glue/cb/t5_utils.py | 4 +- lm_eval/tasks/super_glue/multirc/t5_utils.py | 20 ++- lm_eval/tasks/super_glue/record/t5_utils.py | 118 +++++++----------- lm_eval/tasks/super_glue/wsc/t5-prompt.yaml | 2 +- lm_eval/tasks/super_glue/wsc/t5_utils.py | 79 +++++++----- 8 files changed, 127 insertions(+), 110 deletions(-) diff --git a/lm_eval/api/filter.py b/lm_eval/api/filter.py index cc74634f..ecbabe9f 100644 --- a/lm_eval/api/filter.py +++ b/lm_eval/api/filter.py @@ -4,6 +4,7 @@ from typing import List from lm_eval.api.instance import Instance from datasets import Dataset + class Filter: """ Filter classes operate on a per-task level. diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index 5b77d151..65479d40 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -78,7 +78,7 @@ class HFLM(LM): low_cpu_mem_usage: Optional[bool] = True, trust_remote_code: Optional[bool] = False, use_fast_tokenizer: Optional[bool] = True, - cache_dir: Optional[Union[str,os.PathLike]] = None, + cache_dir: Optional[Union[str, os.PathLike]] = None, # arguments used for splitting a model across GPUs naively. # only used if `parallelize=True`. parallelize: Optional[bool] = False, @@ -425,7 +425,11 @@ class HFLM(LM): return encoding def tok_batch_encode( - self, strings: List[str], padding_side="left", left_truncate_len=None, truncation=False + self, + strings: List[str], + padding_side="left", + left_truncate_len=None, + truncation=False, ): # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode. old_padding_side = self.tokenizer.padding_side @@ -863,7 +867,9 @@ class HFLM(LM): # encode, pad, and truncate contexts for this batch context_enc, attn_masks = self.tok_batch_encode( - contexts, left_truncate_len=max_ctx_len, truncation=self.truncation, + contexts, + left_truncate_len=max_ctx_len, + truncation=self.truncation, ) context_enc = context_enc.to(self.device) attn_masks = attn_masks.to(self.device) diff --git a/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml b/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml index 8b216605..bda3e614 100644 --- a/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml @@ -15,4 +15,3 @@ metric_list: higher_is_better: true ignore_case: true ignore_punctuation: true - diff --git a/lm_eval/tasks/super_glue/cb/t5_utils.py b/lm_eval/tasks/super_glue/cb/t5_utils.py index 82471538..6ac30c52 100644 --- a/lm_eval/tasks/super_glue/cb/t5_utils.py +++ b/lm_eval/tasks/super_glue/cb/t5_utils.py @@ -1,13 +1,15 @@ import sklearn.metrics + def mean_3class_f1(predictions, references): # This is a passthrough function - string_label = ['entailment', 'contradiction', 'neutral'] + string_label = ["entailment", "contradiction", "neutral"] predictions = string_label.index(predictions[0]) references = string_label.index(references[0]) return (predictions, references) + def agg_mean_3class_f1(items): predictions, references = zip(*items) diff --git a/lm_eval/tasks/super_glue/multirc/t5_utils.py b/lm_eval/tasks/super_glue/multirc/t5_utils.py index 29aaa1e1..592507e8 100644 --- a/lm_eval/tasks/super_glue/multirc/t5_utils.py +++ b/lm_eval/tasks/super_glue/multirc/t5_utils.py @@ -3,16 +3,22 @@ import collections import numpy as np import sklearn.metrics + def f1(predictions, references): # This is a passthrough function _prediction = predictions[0] _reference = references[0].split("_")[-1] - string_label = ['False', 'True'] + string_label = ["False", "True"] reference = string_label.index(_reference) - prediction = string_label.index(_prediction) if _prediction in string_label else not bool(reference) + prediction = ( + string_label.index(_prediction) + if _prediction in string_label + else not bool(reference) + ) return (prediction, reference) + def agg_f1(items): predictions, references = zip(*items) @@ -25,9 +31,13 @@ def em(predictions, references): # This is a passthrough function _prediction = predictions[0] _group, _reference = references[0].split("_") - string_label = ['False', 'True'] + string_label = ["False", "True"] reference = string_label.index(_reference) - prediction = string_label.index(_prediction) if _prediction in string_label else not bool(reference) + prediction = ( + string_label.index(_prediction) + if _prediction in string_label + else not bool(reference) + ) return (_group, prediction, reference) @@ -44,4 +54,4 @@ def agg_em(items): score = float(np.array_equal(targets, predictions)) group_scores.append(score) - return np.mean(group_scores) \ No newline at end of file + return np.mean(group_scores) diff --git a/lm_eval/tasks/super_glue/record/t5_utils.py b/lm_eval/tasks/super_glue/record/t5_utils.py index 8431b62f..98730cac 100644 --- a/lm_eval/tasks/super_glue/record/t5_utils.py +++ b/lm_eval/tasks/super_glue/record/t5_utils.py @@ -11,22 +11,23 @@ from lm_eval.api.metrics import metric_max_over_ground_truths def doc_to_text(doc): - passage = doc['passage'] - passage = re.sub(r'(\.|\?|\!|\"|\')\n@highlight\n', r'\1 ', passage) - passage = re.sub(r'\n@highlight\n', '. ', passage) - - return " ".join([ - "record query:", - doc['query'], - "entities:", - ", ".join(doc['entities']), - "passage:", - passage - ]) + passage = doc["passage"] + passage = re.sub(r"(\.|\?|\!|\"|\')\n@highlight\n", r"\1 ", passage) + passage = re.sub(r"\n@highlight\n", ". ", passage) + + return " ".join( + [ + "record query:", + doc["query"], + "entities:", + ", ".join(doc["entities"]), + "passage:", + passage, + ] + ) def process_docs(dataset): - def split_answers(doc): split_doc = { **{k: [] for k in doc.keys()}, @@ -37,54 +38,21 @@ def process_docs(dataset): for key in split_doc.keys(): if key in doc: split_doc[key].append(doc[key]) - + split_doc["answers"].append(answer) return split_doc - + dataset = dataset.map(split_answers) new_dataset = {} for key in dataset.features.keys(): new_dataset[key] = [x for row in dataset[key] for x in row] - - return Dataset.from_dict(new_dataset) - -def deduplicate_metric(metric_fn, - group_key: str = "group", - value_key: str = "value"): - """Returns a metric that only considers one example per group. - - Useful for things like ReCoRD where inputs may be replicated during training - to handle multiple labels, but where at eval we only want a single copy of - each example. - - Args: - metric_fn: function, the metric to compute on the unique examples. - group_key: the key for the grouping value in the target dictionary. - value_key: the key for the value in the dictionaries. - - Returns: - A metric function that deduplicated based on the grouping key before - returning a metric. - """ - def _deduplicated_metric(targets, predictions): - """Deduplicate targets and predictions and pass that to the metric fn.""" - processed_groups = set() - deduplicated_targets = [] - deduplicated_predictions = [] - for targ, pred in zip(targets, predictions): - group = targ[group_key] - if group in processed_groups: - continue - processed_groups.add(group) - deduplicated_targets.append(targ[value_key]) - deduplicated_predictions.append(pred[value_key]) - return metric_fn(deduplicated_targets, deduplicated_predictions) - return _deduplicated_metric + return Dataset.from_dict(new_dataset) def normalize_squad(answer): """Normalization used in official SQuAD evaluation script.""" + def _normalize_answer(text, punc_chars, punc_repl): """Lower text and remove punctuation, articles and extra whitespace.""" @@ -107,39 +75,43 @@ def normalize_squad(answer): return _normalize_answer(answer, punc_chars=string.punctuation, punc_repl="") + def em(predictions, references): # This is a passthrough function return (predictions[0], references[0]) + def f1(predictions, references): # This is a passthrough function return (predictions[0], references[0]) -def squad_em_agg(items): - def _exact_match_score(target, prediction): +def squad_em_agg(items): + def _exact_match_score(prediction, target): return target == prediction grouped_values = collections.defaultdict(lambda: ([], [])) for prediction, reference in items: group, reference = reference.split("_") + # if group not in grouped_values: grouped_values[group][0].append(normalize_squad(prediction)) grouped_values[group][1].append(normalize_squad(reference)) - print(grouped_values) - import sys; sys.exit() - em = np.mean([ - metric_max_over_ground_truths(_exact_match_score, t, p) - for p, t in zip(predictions, targets) - ]) - return em + em = [] + for group in grouped_values.keys(): + predictions, targets = grouped_values[group] + for p in predictions: + em.append(metric_max_over_ground_truths(_exact_match_score, p, targets)) -def squad_f1_agg(items): + return np.mean(em) - def _f1_score(target, prediction): + +def squad_f1_agg(items): + def _f1_score(prediction, target): """Computes token f1 score for a single target and prediction.""" prediction_tokens = prediction.split() target_tokens = target.split() - common = (collections.Counter(prediction_tokens) & - collections.Counter(target_tokens)) + common = collections.Counter(prediction_tokens) & collections.Counter( + target_tokens + ) num_same = sum(common.values()) if num_same == 0: return 0 @@ -148,12 +120,16 @@ def squad_f1_agg(items): f1 = (2 * precision * recall) / (precision + recall) return f1 - predictions, targets = zip(*items) - targets = [normalize_squad(t) for t in targets] - predictions = [normalize_squad(p) for p in predictions] + grouped_values = collections.defaultdict(lambda: ([], [])) + for prediction, reference in items: + group, reference = reference.split("_") + if group not in grouped_values: + grouped_values[group][0].append(normalize_squad(prediction)) + grouped_values[group][1].append(normalize_squad(reference)) + + f1 = [] + for group in grouped_values.keys(): + p, t = grouped_values[group] + f1.append(metric_max_over_ground_truths(_f1_score, p[0], t)) - f1 = np.mean([ - metric_max_over_ground_truths(_f1_score, t, p) - for p, t in zip(predictions, targets) - ]) - return f1 + return np.mean(f1) diff --git a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml index 6c443257..38c73214 100644 --- a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml @@ -15,4 +15,4 @@ metric_list: filter_list: - name: "wsc_postprocessor" filter: - - function: !function t5_utils.WSCPostprocess \ No newline at end of file + - function: !function t5_utils.WSCPostprocess diff --git a/lm_eval/tasks/super_glue/wsc/t5_utils.py b/lm_eval/tasks/super_glue/wsc/t5_utils.py index 801ad6e5..7e55a52a 100644 --- a/lm_eval/tasks/super_glue/wsc/t5_utils.py +++ b/lm_eval/tasks/super_glue/wsc/t5_utils.py @@ -1,61 +1,83 @@ import re from lm_eval.api.filter import Filter + def doc_to_text(x): - text = re.sub(r" X ", " *"+x["span2_text"]+"* ", _wsc_inputs(x)) - return "wsc: "+text + text = re.sub(r" X ", " *" + x["span2_text"] + "* ", _wsc_inputs(x)) + return "wsc: " + text def _wsc_inputs(x): - words = x['text'].split(" ") + words = x["text"].split(" ") # We would need some special logic to handle the case where the pronoun is the # first or last word in the text. None of the examples in WSC seem to have # this, so we are ignoring these cases. - assert x['span2_index'] > 0 - assert x['span2_index'] < len(words) - pronoun_index = x['span2_index'] + assert x["span2_index"] > 0 + assert x["span2_index"] < len(words) + pronoun_index = x["span2_index"] def create_input(): - assert words[pronoun_index] == x['span2_text'] - - return " ".join([ - " ".join( - words[:pronoun_index] - ), - 'X', - " ".join( - words[pronoun_index + 1:] - ), - ]) + assert words[pronoun_index] == x["span2_text"] + + return " ".join( + [ + " ".join(words[:pronoun_index]), + "X", + " ".join(words[pronoun_index + 1 :]), + ] + ) # Handle some special cases. - if x['text'] == 'The boy continued to whip the pony , and eventually the pony threw him over. John laughed out quite loud. \"Good for him,\" he said. ': + if ( + x["text"] + == 'The boy continued to whip the pony , and eventually the pony threw him over. John laughed out quite loud. "Good for him," he said. ' + ): return ( - 'The boy continued to whip the pony , and eventually the pony threw ' + "The boy continued to whip the pony , and eventually the pony threw " 'him over. John laughed out quite loud. "Good for X ," he said.' ) # Using the span2_index, we get 'use' instead of 'it'. - if x['text'] == 'When they had eventually calmed down a bit , and had gotten home, Mr. Farley put the magic pebble in an iron safe . Some day they might want to use it , but really for now, what more could they wish for?': + if ( + x["text"] + == "When they had eventually calmed down a bit , and had gotten home, Mr. Farley put the magic pebble in an iron safe . Some day they might want to use it , but really for now, what more could they wish for?" + ): return ( - 'When they had eventually calmed down a bit , and had gotten home, ' - 'Mr. Farley put the magic pebble in an iron safe . Some day they might ' - 'want to use X , but really for now, what more could they wish for?' + "When they had eventually calmed down a bit , and had gotten home, " + "Mr. Farley put the magic pebble in an iron safe . Some day they might " + "want to use X , but really for now, what more could they wish for?" ) return create_input() class WSCPostprocess(Filter): - def __init__(self, **kwargs): self.determiners = { - "a", "an", "few", "her", "his", "each", "every", "many", "much", "my", - "our", "some", "that", "the", "their", "these", "this", "those", "which", - "whose", "your" + "a", + "an", + "few", + "her", + "his", + "each", + "every", + "many", + "much", + "my", + "our", + "some", + "that", + "the", + "their", + "these", + "this", + "those", + "which", + "whose", + "your", } def clean(self, s): @@ -81,7 +103,8 @@ class WSCPostprocess(Filter): # Handle cases where the prediction is "fuzzy bunny" and the referent is # "bunny". predicted_referent = prediction_words.issubset( - referent_words) or referent_words.issubset(prediction_words) + referent_words + ) or referent_words.issubset(prediction_words) filtered_resps.append(predicted_referent) -- GitLab From a3eb639302959fabf04d734d4187d3f03b0d57bb Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Fri, 25 Aug 2023 13:41:41 +0000 Subject: [PATCH 046/264] fixed group to int --- lm_eval/tasks/super_glue/multirc/t5_utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lm_eval/tasks/super_glue/multirc/t5_utils.py b/lm_eval/tasks/super_glue/multirc/t5_utils.py index 592507e8..06812d8f 100644 --- a/lm_eval/tasks/super_glue/multirc/t5_utils.py +++ b/lm_eval/tasks/super_glue/multirc/t5_utils.py @@ -39,11 +39,10 @@ def em(predictions, references): # This is a passthrough function else not bool(reference) ) - return (_group, prediction, reference) + return (int(_group), prediction, reference) def agg_em(items): - grouped_values = collections.defaultdict(lambda: ([], [])) for group, prediction, reference in items: grouped_values[group][0].append(reference) -- GitLab From ec4bc4930f30ec370a0c8a9a35f1ad26f1e322cc Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Fri, 25 Aug 2023 15:13:24 +0000 Subject: [PATCH 047/264] reformat --- lm_eval/tasks/super_glue/multirc/t5_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/super_glue/multirc/t5_utils.py b/lm_eval/tasks/super_glue/multirc/t5_utils.py index 06812d8f..ac99aaf9 100644 --- a/lm_eval/tasks/super_glue/multirc/t5_utils.py +++ b/lm_eval/tasks/super_glue/multirc/t5_utils.py @@ -39,7 +39,7 @@ def em(predictions, references): # This is a passthrough function else not bool(reference) ) - return (int(_group), prediction, reference) + return (_group, prediction, reference) def agg_em(items): -- GitLab From 1d77a7b816f7803b7de066e34c485e529176d4b8 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Fri, 25 Aug 2023 15:20:47 +0000 Subject: [PATCH 048/264] added info --- lm_eval/tasks/super_glue/README.md | 69 ++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 lm_eval/tasks/super_glue/README.md diff --git a/lm_eval/tasks/super_glue/README.md b/lm_eval/tasks/super_glue/README.md new file mode 100644 index 00000000..74e149a7 --- /dev/null +++ b/lm_eval/tasks/super_glue/README.md @@ -0,0 +1,69 @@ +# SuperGLUE + +### Paper + +Title: `SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems` +Abstract: `https://w4ngatang.github.io/static/papers/superglue.pdf` + +SuperGLUE is a benchmark styled after GLUE with a new set of more difficult language +understanding tasks. + +Homepage: https://super.gluebenchmark.com/ + +### Citation + +``` +@inproceedings{NEURIPS2019_4496bf24, + author = {Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett}, + pages = {}, + publisher = {Curran Associates, Inc.}, + title = {SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, + url = {https://proceedings.neurips.cc/paper/2019/file/4496bf24afe7fab6f046bf4923da8de6-Paper.pdf}, + volume = {32}, + year = {2019} +} +``` + +### Groups and Tasks + +#### Groups + +* `super-glue-lm-eval-v1`: SuperGLUE eval adapted from LM Eval V1 +* `super-glue-t5-prompt`: SuperGLUE prompt and evaluation that matches the T5 paper (if using accelerate, will error if record is included.) + +#### Tasks + +* `super-glue-lm-eval-v1` + - `boolq` + - `cb` + - `copa` + - `multirc` + - `record` + - `rte` + - `wic` + - `wsc` + +* `super-glue-t5-prompt` + - `super_glue-boolq-t5-prompt` + - `super_glue-cb-t5-prompt` + - `super_glue-copa-t5-prompt` + - `super_glue-multirc-t5-prompt` + - `super_glue-record-t5-prompt` + - `super_glue-rte-t5-prompt` + - `super_glue-wic-t5-prompt` + - `super_glue-wsc-t5-prompt` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? -- GitLab From 5aec6f26851a14720ae1dee815aae71b494df2d5 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Fri, 25 Aug 2023 15:33:03 +0000 Subject: [PATCH 049/264] add benchmark with t5x --- lm_eval/tasks/super_glue/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lm_eval/tasks/super_glue/README.md b/lm_eval/tasks/super_glue/README.md index 74e149a7..c8e80771 100644 --- a/lm_eval/tasks/super_glue/README.md +++ b/lm_eval/tasks/super_glue/README.md @@ -35,6 +35,14 @@ Homepage: https://super.gluebenchmark.com/ #### Tasks +Comparison between validation split score on T5x and LM-Eval (T5x models converted to HF) +| T5V1.1 Base | SGLUE | BoolQ | CB | Copa | MultiRC | ReCoRD | RTE | WiC | WSC | +| ----------- | ------| ----- | --------- | ---- | ------- | ------ | --- | --- | --- | +| T5x | 69.47 | 78.47(acc) | 83.93(f1) 87.5(acc) | 50(acc) | 73.81(f1) 33.26(em) | 70.09(em) 71.34(f1) | 78.7(acc) | 63.64(acc) | 75(acc) | +| LM-Eval | 71.35 | 79.36(acc) | 83.63(f1) 87.5(acc) | 63(acc) | 73.45(f1) 33.26(em) | 69.85(em) 68.86(f1) | 78.34(acc) | 65.83(acc) | 75.96(acc) | + + + * `super-glue-lm-eval-v1` - `boolq` - `cb` -- GitLab From b7082722ccc69fc6bb80459f4b5e461f2bcb6282 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Fri, 25 Aug 2023 15:36:49 +0000 Subject: [PATCH 050/264] removed unused line --- lm_eval/tasks/super_glue/cb/t5_utils.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/lm_eval/tasks/super_glue/cb/t5_utils.py b/lm_eval/tasks/super_glue/cb/t5_utils.py index 6ac30c52..caf84390 100644 --- a/lm_eval/tasks/super_glue/cb/t5_utils.py +++ b/lm_eval/tasks/super_glue/cb/t5_utils.py @@ -16,7 +16,6 @@ def agg_mean_3class_f1(items): """Computes the unweighted average of the F1 per class.""" metric_str = "fbeta_score" - metric_post_process_fn = lambda x: x metric_fn_kwargs = { "beta": 1, "labels": range(3), @@ -26,8 +25,6 @@ def agg_mean_3class_f1(items): def _fn(predictions, references): metric_fn = getattr(sklearn.metrics, metric_str) metric_val = metric_fn(references, predictions, **metric_fn_kwargs) - if metric_post_process_fn is not None: - metric_val = metric_post_process_fn(metric_val) return metric_val return _fn(predictions, references) -- GitLab From 20f0f313c2ec5b7493b0441c0fb683749de7b349 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Fri, 25 Aug 2023 16:17:52 +0000 Subject: [PATCH 051/264] add wsc273 --- lm_eval/tasks/wsc273/README.md | 60 +++++++++++++++++++++++++++++++ lm_eval/tasks/wsc273/default.yaml | 15 ++++++++ lm_eval/tasks/wsc273/utils.py | 36 +++++++++++++++++++ 3 files changed, 111 insertions(+) create mode 100644 lm_eval/tasks/wsc273/README.md create mode 100644 lm_eval/tasks/wsc273/default.yaml create mode 100644 lm_eval/tasks/wsc273/utils.py diff --git a/lm_eval/tasks/wsc273/README.md b/lm_eval/tasks/wsc273/README.md new file mode 100644 index 00000000..59e73047 --- /dev/null +++ b/lm_eval/tasks/wsc273/README.md @@ -0,0 +1,60 @@ +# WSC273 + +### Paper + +Title: `The Winograd Schema Challenge` + +Abstract: http://commonsensereasoning.org/2011/papers/Levesque.pdf + +A Winograd schema is a pair of sentences that differ in only one or two words +and that contain an ambiguity that is resolved in opposite ways in the two +sentences and requires the use of world knowledge and reasoning for its resolution. +The Winograd Schema Challenge 273 is a collection of 273 such Winograd schemas. + +NOTE: This evaluation of Winograd Schema Challenge is based on `partial evaluation` +as described by Trinh & Le in Simple Method for Commonsense Reasoning (2018). +See: https://arxiv.org/abs/1806.0 + +Homepage: https://cs.nyu.edu/~davise/papers/WinogradSchemas/WS.html + + +### Citation + +``` +@inproceedings{ea01b9c0db064caca6986b925d75f2bb, + title = "The winograd schema challenge", + abstract = "In this paper, we present an alternative to the Turing Test that has some conceptual and practical advantages. A Wino-grad schema is a pair of sentences that differ only in one or two words and that contain a referential ambiguity that is resolved in opposite directions in the two sentences. We have compiled a collection of Winograd schemas, designed so that the correct answer is obvious to the human reader, but cannot easily be found using selectional restrictions or statistical techniques over text corpora. A contestant in the Winograd Schema Challenge is presented with a collection of one sentence from each pair, and required to achieve human-level accuracy in choosing the correct disambiguation.", + author = "Levesque, {Hector J.} and Ernest Davis and Leora Morgenstern", + year = "2012", + language = "English (US)", + isbn = "9781577355601", + series = "Proceedings of the International Conference on Knowledge Representation and Reasoning", + publisher = "Institute of Electrical and Electronics Engineers Inc.", + pages = "552--561", + booktitle = "13th International Conference on the Principles of Knowledge Representation and Reasoning, KR 2012", + note = "13th International Conference on the Principles of Knowledge Representation and Reasoning, KR 2012 ; Conference date: 10-06-2012 Through 14-06-2012", +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of any group yet. + +#### Tasks + +* `wsc273` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/wsc273/default.yaml b/lm_eval/tasks/wsc273/default.yaml new file mode 100644 index 00000000..5fb224e8 --- /dev/null +++ b/lm_eval/tasks/wsc273/default.yaml @@ -0,0 +1,15 @@ +task: wsc273 +dataset_path: winograd_wsc +dataset_name: wsc273 +output_type: multiple_choice +test_split: test +doc_to_text: label +process_docs: !function utils.process_doc +doc_to_target: "{% set index = pronoun_loc + pronoun | length %}{{text[index:]}}" +doc_to_choice: "{% set template = text[:pronoun_loc] %}{{[template+options[0], template+options[1]]}}" +should_decontaminate: true +doc_to_decontamination_query: text +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true diff --git a/lm_eval/tasks/wsc273/utils.py b/lm_eval/tasks/wsc273/utils.py new file mode 100644 index 00000000..6e47dc97 --- /dev/null +++ b/lm_eval/tasks/wsc273/utils.py @@ -0,0 +1,36 @@ + +upper_pronouns = [ + "A", + "An", + "The", + "She", + "He", + "It", + "They", + "My", + "His", + "Her", + "Their", +] + +def process_doc(dataset): + + def process_fn(doc): + # The HF implementation of `wsc273` is not `partial evaluation` friendly. + doc["text"] = doc["text"].replace(" ", " ") + doc["options"][0] = __normalize_option(doc, doc["options"][0]) + doc["options"][1] = __normalize_option(doc, doc["options"][1]) + return doc + + return dataset.map(process_fn) + +def __normalize_option(doc, option): + # Append `'s` to possessive determiner based options. + if doc["pronoun"].lower() in ["my", "his", "her", "our", "their"]: + option += "'s" + # Appropriately lowercase the pronoun in the option. + pronoun = option.split()[0] + start_of_sentence = doc["text"][doc["pronoun_loc"] - 2] == "." + if not start_of_sentence and pronoun in upper_pronouns: + return option.replace(pronoun, pronoun.lower()) + return option \ No newline at end of file -- GitLab From da85f290dca3deedde2e88cb870afadf7aab9ad3 Mon Sep 17 00:00:00 2001 From: Ethan Smith Date: Fri, 25 Aug 2023 14:46:03 -0700 Subject: [PATCH 052/264] First clean mypy run, disable all errors --- .pre-commit-config.yaml | 6 ++++++ mypy.ini | 29 +++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 mypy.ini diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8455eb0d..31ef5ca0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -43,3 +43,9 @@ repos: .*\.json|ignore.txt )$ args: [--check-filenames, --check-hidden, --ignore-words=ignore.txt] + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.5.1 + hooks: + - id: mypy + additional_dependencies: [".[sentencepiece,multilingual,promptsource,gptq]", "types-PyYAML", "types-requests"] + exclude: ^tests/.*$ diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..99a101c3 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,29 @@ +[mypy] +python_version = 3.9 +show_traceback = True +check_untyped_defs = True +no_implicit_reexport = True +warn_unreachable = True +warn_unused_configs = True +warn_unused_ignores = True +warn_redundant_casts = True + +# We ignore errors everywhere to gradually add type annotations + +[mypy-lm_eval.*] +ignore_errors = True + +[mypy-lm_eval.api.*] +ignore_errors = True + +[mypy-lm_eval.prompts.*] +ignore_errors = True + +[mypy-lm_eval.models.*] +ignore_errors = True + +[mypy-scripts.*] +ignore_errors = True + +[mypy-main] +ignore_errors = True \ No newline at end of file -- GitLab From fc69d84f0fdeed782fa4971b08f096d80fa9b8bc Mon Sep 17 00:00:00 2001 From: Ethan Smith Date: Fri, 25 Aug 2023 14:46:34 -0700 Subject: [PATCH 053/264] Add suggestions from autotyping This adds a bunch of simple annotations suggested by https://github.com/JelleZijlstra/autotyping. --- lm_eval/api/filter.py | 5 +-- lm_eval/api/instance.py | 2 +- lm_eval/api/metrics.py | 2 +- lm_eval/api/model.py | 10 ++--- lm_eval/api/samplers.py | 8 ++-- lm_eval/api/task.py | 28 ++++-------- lm_eval/benchmarks/__init__.py | 3 +- lm_eval/decontamination/archiver.py | 31 ++++++++----- lm_eval/decontamination/decontaminate.py | 7 +-- lm_eval/decontamination/janitor.py | 44 +++++++++---------- lm_eval/evaluator.py | 18 +++----- lm_eval/filters/decontamination.py | 4 +- lm_eval/filters/extraction.py | 7 +-- lm_eval/filters/selection.py | 7 ++- lm_eval/models/anthropic_llms.py | 5 +-- lm_eval/models/dummy.py | 2 +- lm_eval/models/huggingface.py | 12 ++--- lm_eval/models/openai_completions.py | 8 ++-- lm_eval/models/textsynth.py | 6 +-- lm_eval/prompts/__init__.py | 4 +- lm_eval/tasks/__init__.py | 8 ++-- lm_eval/tasks/glue/mnli/utils.py | 2 +- lm_eval/tasks/hendrycks_ethics/utils.py | 2 +- lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py | 4 +- lm_eval/utils.py | 25 +++++------ main.py | 4 +- 26 files changed, 122 insertions(+), 136 deletions(-) diff --git a/lm_eval/api/filter.py b/lm_eval/api/filter.py index 393623a7..6bcee4d3 100644 --- a/lm_eval/api/filter.py +++ b/lm_eval/api/filter.py @@ -13,7 +13,7 @@ class Filter: """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: """ Can define custom behavior here, if an individual instantiation of a Filter class should have state. """ @@ -40,8 +40,7 @@ class FilterEnsemble: name: str filters: List[Filter] - def apply(self, instances: List[Instance]): - + def apply(self, instances: List[Instance]) -> None: resps = [ inst.resps for inst in instances ] # operate just on the model responses diff --git a/lm_eval/api/instance.py b/lm_eval/api/instance.py index f56b712b..f3e7f005 100644 --- a/lm_eval/api/instance.py +++ b/lm_eval/api/instance.py @@ -19,7 +19,7 @@ class Instance: doc_id: str = None repeats: str = None - def __post_init__(self): + def __post_init__(self) -> None: # unpack metadata field self.task_name, self.doc_id, self.repeats = self.metadata diff --git a/lm_eval/api/metrics.py b/lm_eval/api/metrics.py index c618bbcf..116f20e1 100644 --- a/lm_eval/api/metrics.py +++ b/lm_eval/api/metrics.py @@ -302,7 +302,7 @@ def _sacreformat(refs, preds): class _bootstrap_internal: - def __init__(self, f, n): + def __init__(self, f, n) -> None: self.f = f self.n = n diff --git a/lm_eval/api/model.py b/lm_eval/api/model.py index 8ca7c53b..2bcce8dc 100644 --- a/lm_eval/api/model.py +++ b/lm_eval/api/model.py @@ -13,7 +13,7 @@ from lm_eval.logger import eval_logger class LM(abc.ABC): - def __init__(self): + def __init__(self) -> None: """Defines the interface that should be implemented by all LM subclasses. LMs are assumed to take text (strings) as input and yield strings as output (inputs/outputs should be tokenization-agnostic.) @@ -133,7 +133,7 @@ class LM(abc.ABC): # not support multi-device parallelism nor expect it. return self._world_size - def set_cache_hook(self, cache_hook): + def set_cache_hook(self, cache_hook) -> None: self.cache_hook = cache_hook @@ -144,14 +144,14 @@ def hash_args(attr, args): class CacheHook: - def __init__(self, cachinglm): + def __init__(self, cachinglm) -> None: if cachinglm is None: self.dbdict = None return self.dbdict = cachinglm.dbdict - def add_partial(self, attr, req, res): + def add_partial(self, attr, req, res) -> None: if self.dbdict is None: return hsh = hash_args(attr, req) @@ -159,7 +159,7 @@ class CacheHook: class CachingLM: - def __init__(self, lm, cache_db): + def __init__(self, lm, cache_db) -> None: """LM wrapper that returns cached results if they exist, and uses the underlying LM if not. :param lm: LM diff --git a/lm_eval/api/samplers.py b/lm_eval/api/samplers.py index 60511928..59e21f07 100644 --- a/lm_eval/api/samplers.py +++ b/lm_eval/api/samplers.py @@ -1,6 +1,5 @@ class Sampler: - def __init__(self, docs, task, fewshot_indices=None, rnd=None): - + def __init__(self, docs, task, fewshot_indices=None, rnd=None) -> None: self.rnd = rnd assert self.rnd, "must pass rnd to FewShotSampler!" @@ -19,7 +18,6 @@ class Sampler: self.docs = self.docs.select(fewshot_indices) def get_context(self, doc, num_fewshot): - # draw an extra fewshot sample if using same split as evaluating on n_samples = ( num_fewshot + 1 @@ -74,7 +72,7 @@ class Sampler: class BalancedSampler(Sampler): - def sample(self, n): + def sample(self, n) -> None: """ TODO: this should return approximately class-balanced samples from our fewshot examples. TODO: what order should they be in? maybe random? @@ -84,7 +82,7 @@ class BalancedSampler(Sampler): class ManualSampler(Sampler): - def sample(self, n): + def sample(self, n) -> None: """ """ pass diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 50c53764..aaa8c441 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -88,8 +88,7 @@ class TaskConfig(dict): metadata: str = None # by default, not used in the code. allows for users to pass arbitrary info to tasks - def __post_init__(self): - + def __post_init__(self) -> None: if self.generation_kwargs is not None: if self.output_type != "greedy_until": eval_logger.warning( @@ -171,7 +170,7 @@ class Task(abc.ABC): cache_dir=None, download_mode=None, config=None, - ): + ) -> None: """ :param data_dir: str Stores the path to a local folder containing the `Task`'s data files. @@ -213,7 +212,7 @@ class Task(abc.ABC): list(self.fewshot_docs()), self, rnd=random.Random(1234) ) - def download(self, data_dir=None, cache_dir=None, download_mode=None): + def download(self, data_dir=None, cache_dir=None, download_mode=None) -> None: """Downloads and returns the task dataset. Override this method to download the dataset from a custom API. @@ -322,7 +321,7 @@ class Task(abc.ABC): return rnd.sample(self._training_docs, k) - def doc_to_decontamination_query(self, doc): + def doc_to_decontamination_query(self, doc) -> None: print( "Override doc_to_decontamination_query with document specific decontamination query." ) @@ -336,7 +335,7 @@ class Task(abc.ABC): def doc_to_target(self, doc): pass - def build_all_requests(self, limit=None, rank=None, world_size=None): + def build_all_requests(self, limit=None, rank=None, world_size=None) -> None: """Build a set of Instances for a task, and store them in task.instances""" if self.has_test_docs(): docs = self.test_docs() @@ -472,7 +471,6 @@ class Task(abc.ABC): return labeled_examples + str(example) def apply_filters(self): - if hasattr(self, "_filters"): for f in self._filters: f.apply(self._instances) @@ -498,7 +496,7 @@ class ConfigurableTask(Task): def __init__( self, data_dir=None, cache_dir=None, download_mode=None, config: dict = None - ): # TODO no super() call here + ) -> None: # TODO no super() call here # Get pre-configured attributes self._config = self.CONFIG @@ -570,7 +568,6 @@ class ConfigurableTask(Task): "aggregation" ] else: - INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()} metric_agg = get_default_aggregation(metric_name) eval_logger.warning( @@ -683,8 +680,7 @@ class ConfigurableTask(Task): f'Both target_delimiter and target choice: "{choice}" does not have whitespace, ignore if the language you are evaluating on does not require/use whitespace' ) - def download(self, dataset_kwargs=None): - + def download(self, dataset_kwargs=None) -> None: self.dataset = datasets.load_dataset( path=self.DATASET_PATH, name=self.DATASET_NAME, @@ -767,7 +763,6 @@ class ConfigurableTask(Task): return doc def doc_to_text(self, doc): - if self.prompt is not None: doc_to_text = self.prompt else: @@ -802,7 +797,6 @@ class ConfigurableTask(Task): raise TypeError def doc_to_target(self, doc: dict) -> Union[int, str, list]: - if self.prompt is not None: doc_to_target = self.prompt else: @@ -844,7 +838,6 @@ class ConfigurableTask(Task): raise TypeError def doc_to_choice(self, doc: Any) -> List[str]: - if self.prompt is not None: doc_to_choice = self.prompt elif self._config.doc_to_choice is None: @@ -888,13 +881,11 @@ class ConfigurableTask(Task): def construct_requests( self, doc: dict, ctx: str, **kwargs ) -> Union[List[Instance], Instance]: - if self.OUTPUT_TYPE == "loglikelihood": arguments = (ctx, self.doc_to_target(doc)) elif self.OUTPUT_TYPE == "loglikelihood_rolling": arguments = (self.doc_to_target(doc),) elif self.OUTPUT_TYPE == "multiple_choice": - choices = self.doc_to_choice(doc) target_delimiter = self._config.target_delimiter if self.multiple_input: @@ -945,7 +936,6 @@ class ConfigurableTask(Task): ) def process_results(self, doc, results): - if callable(self._config.process_results): return self._config.process_results(doc, results) @@ -980,7 +970,6 @@ class ConfigurableTask(Task): ), } elif self.OUTPUT_TYPE == "multiple_choice": - lls, is_greedy = zip(*results) # retrieve choices in List[str] form, to compute choice lengths, etc. @@ -1034,7 +1023,6 @@ class ConfigurableTask(Task): result_dict["acc_mutual_info"] = acc_mutual_info elif self.OUTPUT_TYPE == "greedy_until": - gold = self.doc_to_target(doc) if self._config.doc_to_choice is not None: # If you set doc_to_choice, @@ -1164,7 +1152,7 @@ class PerplexityTask(Task): def doc_to_decontamination_query(self, doc): return doc - def doc_to_text(self, doc): + def doc_to_text(self, doc) -> str: return "" def doc_to_target(self, doc): diff --git a/lm_eval/benchmarks/__init__.py b/lm_eval/benchmarks/__init__.py index 4924691d..77dcdef0 100644 --- a/lm_eval/benchmarks/__init__.py +++ b/lm_eval/benchmarks/__init__.py @@ -11,8 +11,7 @@ from lm_eval.api.registry import ( ) -def include_benchmarks(task_dir): - +def include_benchmarks(task_dir: str) -> None: for root, subdirs, file_list in os.walk(task_dir): if (subdirs == [] or subdirs == ["__pycache__"]) and (len(file_list) > 0): for f in file_list: diff --git a/lm_eval/decontamination/archiver.py b/lm_eval/decontamination/archiver.py index 488a55dd..3b5f09f5 100644 --- a/lm_eval/decontamination/archiver.py +++ b/lm_eval/decontamination/archiver.py @@ -1,4 +1,5 @@ import os +from typing import Any import zstandard import json import jsonlines @@ -9,7 +10,7 @@ import tqdm from pathlib import Path -def json_serial(obj): +def json_serial(obj: Any) -> str: """JSON serializer for objects not serializable by default json code""" if isinstance(obj, (datetime.datetime,)): @@ -19,7 +20,7 @@ def json_serial(obj): # Modified version of lm_dataformat Archive for single file. class Archive: - def __init__(self, file_path, compression_level=3): + def __init__(self, file_path: str, compression_level: int = 3) -> None: self.file_path = file_path dir_name = os.path.dirname(file_path) if dir_name: @@ -28,7 +29,7 @@ class Archive: self.cctx = zstandard.ZstdCompressor(level=compression_level) self.compressor = self.cctx.stream_writer(self.fh) - def add_data(self, data, meta={}): + def add_data(self, data, meta={}) -> None: self.compressor.write( json.dumps({"text": data, "meta": meta}, default=json_serial).encode( "UTF-8" @@ -36,7 +37,7 @@ class Archive: + b"\n" ) - def commit(self): + def commit(self) -> None: self.compressor.flush(zstandard.FLUSH_FRAME) self.fh.flush() self.fh.close() @@ -44,10 +45,16 @@ class Archive: # Modified version of lm_dataformat Reader with self.fh set, allowing peeking for tqdm. class Reader: - def __init__(self): + def __init__(self) -> None: pass - def read(self, file, get_meta=False, autojoin_paragraphs=True, para_joiner="\n\n"): + def read( + self, + file, + get_meta: bool = False, + autojoin_paragraphs: bool = True, + para_joiner: str = "\n\n", + ): with open(file, "rb") as fh: self.fh = fh cctx = zstandard.ZstdDecompressor() @@ -72,7 +79,7 @@ class Reader: class TextArchive: - def __init__(self, file_path, mode="rb+"): + def __init__(self, file_path, mode: str = "rb+") -> None: self.file_path = file_path dir_name = os.path.dirname(file_path) if dir_name: @@ -83,21 +90,21 @@ class TextArchive: self.fh = open(self.file_path, mode) - def add_data(self, data): + def add_data(self, data) -> None: self.fh.write(data.encode("UTF-8") + b"\n") - def commit(self): + def commit(self) -> None: self.fh.flush() self.fh.close() class TextReader: - def __init__(self, file_path): + def __init__(self, file_path) -> None: self.file_path = file_path # Optimized mmap read with infrequent tqdm updates to maintain speed # Tested up to 250MB/s. - def read_tqdm(self, update_frequency=10000): + def read_tqdm(self, update_frequency: int = 10000): current_file_position = 0 line_counter = 0 with open(self.file_path, "r") as fh, tqdm.tqdm( @@ -149,7 +156,7 @@ class TextReader: # Optimized for speed. Decompresses the archive in shell before # using the mmap'd TextReader. class ZStdTextReader: - def __init__(self, file): + def __init__(self, file) -> None: self.file = file def read_tqdm(self): diff --git a/lm_eval/decontamination/decontaminate.py b/lm_eval/decontamination/decontaminate.py index ce814460..447eae52 100644 --- a/lm_eval/decontamination/decontaminate.py +++ b/lm_eval/decontamination/decontaminate.py @@ -11,7 +11,7 @@ from .archiver import ZStdTextReader # Was used for testing the evaluator decoupled from the full logic below -def get_train_overlap_stub(docs, ngrams_path, ngrams_n_size): +def get_train_overlap_stub(docs: dict, ngrams_path: str, ngrams_n_size: str): simulated_overlap = 0.1 contaminated = int(len(docs) * simulated_overlap) return random.sample(range(len(docs)), contaminated) @@ -25,6 +25,7 @@ def get_train_overlap_stub(docs, ngrams_path, ngrams_n_size): # scripts are an info.json file containing the n_gram_size (13) and a bunch of "ngrams_{x}.bkt.txt.sorted.zst" # files. These should exist in the "ngrams_path" provided to this function. + # Algorithm: # 1. Build lookups for each dataset {ngram: list(document_ids)} # 2. Merge into an overall lookup {ngram: [(task_name, task_set, doc_ids),]} @@ -33,7 +34,7 @@ def get_train_overlap_stub(docs, ngrams_path, ngrams_n_size): # 4. Strip the task_set from the dictionary keys and return # # We cache the task+set lookups as well as the overlaps. -def get_train_overlap(docs_by_task_set, ngrams_path, limit): +def get_train_overlap(docs_by_task_set: dict, ngrams_path: str, limit: int) -> dict: # return get_train_overlap_stub(docs, ngrams_path, ngrams_n_size) info_dict_path = os.path.join(ngrams_path, "info.json") @@ -46,7 +47,7 @@ def get_train_overlap(docs_by_task_set, ngrams_path, limit): print("Building Lookups...") start = time.perf_counter() - def get_overlaps_dump_path(task_name, task_set, ngrams_n_size, limit): + def get_overlaps_dump_path(task_name, task_set, ngrams_n_size, limit) -> str: return f"data/{task_name}/{task_set}_{ngrams_n_size}grams_limit{limit}.overlaps" lookups = {} diff --git a/lm_eval/decontamination/janitor.py b/lm_eval/decontamination/janitor.py index 458ee223..76bd39d4 100644 --- a/lm_eval/decontamination/janitor.py +++ b/lm_eval/decontamination/janitor.py @@ -1,9 +1,9 @@ import re import string -import timeit import pickle import traceback from pprint import pprint +from typing import Iterator, Sequence, TypeVar # This is a cpp module. Compile janitor_util.cpp with: # c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) janitor_util.cpp -o janitor_util$(python3-config --extension-suffix) -undefined dynamic_lookup @@ -16,10 +16,11 @@ except Exception: traceback.print_exc() JANITOR_CPP = False +T = TypeVar("T") # Implementation from nltk source # https://www.nltk.org/_modules/nltk/util.html -def form_ngrams(sequence, n): +def form_ngrams(sequence: Iterator[T], n: int) -> Iterator[tuple[T, ...]]: history = [] while n > 1: # PEP 479, prevent RuntimeError from being raised when StopIteration bubbles out of generator @@ -36,7 +37,7 @@ def form_ngrams(sequence, n): del history[0] -def word_ngrams(s, n): +def word_ngrams(s: str, n: int) -> Iterator[str]: """Splits a string into ngram words""" tokens = s.split() # not a generator :( ngram_seqs = form_ngrams(iter(tokens), n) @@ -68,14 +69,14 @@ def word_ngrams(s, n): # https://stackoverflow.com/questions/13734451/string-split-with-indices-in-python -def split_indices(s): +def split_indices(s: str) -> Iterator[tuple[str, tuple[int, int]]]: """Splits a string on whitespaces and records the indices of each in the original string. @:return generator((word, (start_idx, end_idx)), ...) """ return ((m.group(0), (m.start(), m.end() - 1)) for m in re.finditer(r"\S+", s)) -def word_ngrams_indices(s, n): +def word_ngrams_indices(s: str, n: int) -> Iterator[tuple[str, tuple[int, int]]]: """Splits a string into pairs of (ngram words, their start/end indices)""" tokens_with_indices = split_indices(s) @@ -104,16 +105,15 @@ def word_ngrams_indices(s, n): class Janitor: - # FIXME delete_chars: Should anything else go here? Special chars? def __init__( self, - ngram_n=13, - window_to_remove=200, - too_dirty_cutoff=10, - minimum_slice_length=200, - delete_chars=string.punctuation, - ): + ngram_n: int = 13, + window_to_remove: int = 200, + too_dirty_cutoff: int = 10, + minimum_slice_length: int = 200, + delete_chars: str = string.punctuation, + ) -> None: self.ngram_n = ngram_n self.window_to_remove = window_to_remove self.too_dirty_cutoff = too_dirty_cutoff @@ -135,11 +135,11 @@ class Janitor: # I/O for saving contamination ngrams ############## - def save_contamination_ngrams(self, filename): + def save_contamination_ngrams(self, filename: str) -> None: with open(filename, "wb") as fp: pickle.dump(filename, fp) - def load_contamination_ngrams(self, filename): + def load_contamination_ngrams(self, filename: str) -> None: with open(filename, "rb") as fp: self.dirt_ngrams = pickle.load(fp) @@ -147,7 +147,7 @@ class Janitor: # Call these :) ############## - def register_contaminant(self, dirt_string): + def register_contaminant(self, dirt_string: str) -> None: """Register a string as contamination to be removed, e.g. a test set This breaks the dirt_string into ngrams to store for future cleaning""" if JANITOR_CPP: @@ -156,7 +156,7 @@ class Janitor: print("WARNING: Janitor running in python mode") return self.register_contaminant_python(dirt_string) - def clean(self, dirty_string): + def clean(self, dirty_string: str) -> list[str]: """Clean a string (e.g. a training set) by removing all ngrams previously registered as contaminants. Returns a list of clean chunks, or empty if the string was too dirty""" @@ -166,7 +166,7 @@ class Janitor: print("WARNING: Janitor running in python mode") return self.clean_python(dirty_string) - def _split_chunks(self, dirty_string, dirty_parts): + def _split_chunks(self, dirty_string: str, dirty_parts: Sequence[tuple]) -> list[str]: clean_chunks = [] splice_idx = 0 end = -1 @@ -189,12 +189,12 @@ class Janitor: # Fast C++ ############## - def register_contaminant_cpp(self, dirt_string): + def register_contaminant_cpp(self, dirt_string) -> None: self.dirt_ngrams.update( janitor_util.clean_ngram(dirt_string, self.delete_chars, self.ngram_n) ) - def clean_cpp(self, dirty_string): + def clean_cpp(self, dirty_string: str) -> list[str]: contamination_indices = janitor_util.clean_ngram_with_indices( dirty_string, self.delete_chars, self.ngram_n ) @@ -204,15 +204,15 @@ class Janitor: # Slow python ############## - def normalize_string(self, s): + def normalize_string(self, s: str) -> str: return s.translate(self.translation_table) - def register_contaminant_python(self, dirt_string): + def register_contaminant_python(self, dirt_string: str) -> None: self.dirt_ngrams.update( word_ngrams(self.normalize_string(dirt_string), self.ngram_n) ) - def clean_python(self, dirty_string): + def clean_python(self, dirty_string: str) -> list[str]: contamination_indices = ( (None, *idx_pair) for dirty_ngram, idx_pair in word_ngrams_indices(dirty_string, self.ngram_n) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 7aef2acd..1efe98ce 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -42,11 +42,11 @@ def simple_evaluate( device=None, use_cache=None, limit=None, - bootstrap_iters=100000, - check_integrity=False, + bootstrap_iters: int = 100000, + check_integrity: bool = False, decontamination_ngrams_path=None, - write_out=False, - log_samples=True, + write_out: bool = False, + log_samples: bool = True, ): """Instantiate and evaluate a model on a list of tasks. @@ -117,7 +117,6 @@ def simple_evaluate( task_dict = lm_eval.tasks.get_task_dict(tasks) for task_name in task_dict.keys(): - task_obj = task_dict[task_name] if type(task_obj) == tuple: group, task_obj = task_obj @@ -175,10 +174,10 @@ def evaluate( lm, task_dict, limit=None, - bootstrap_iters=100000, + bootstrap_iters: int = 100000, decontamination_ngrams_path=None, - write_out=False, - log_samples=True, + write_out: bool = False, + log_samples: bool = True, ): """Instantiate and evaluate a model on a list of tasks. @@ -224,7 +223,6 @@ def evaluate( # get lists of each type of request for task_name, task in task_dict.items(): - if type(task) == tuple: group, task = task task_groups[task_name] = group @@ -349,7 +347,6 @@ def evaluate( # if multigpu, then gather data across all ranks # first gather logged samples across all ranks for task_name, task_samples in list(samples.items()): - full_samples = [None] * lm.world_size torch.distributed.all_gather_object(full_samples, task_samples) @@ -358,7 +355,6 @@ def evaluate( # then collect metrics across all ranks vals_torch = collections.defaultdict(list) for (task_name, key, metric), items in vals.items(): - numitem = 0 if type(items[0]) == tuple: numitem = len(items[0]) diff --git a/lm_eval/filters/decontamination.py b/lm_eval/filters/decontamination.py index d95db3bb..a0cf6a1d 100644 --- a/lm_eval/filters/decontamination.py +++ b/lm_eval/filters/decontamination.py @@ -9,7 +9,7 @@ class DecontaminationFilter(Filter): name = "track_decontamination" - def __init__(self, path): + def __init__(self, path) -> None: """ TODO: make sure only ever run one time on the train set (should this be cached as a class var? keyed by value for "path"). @@ -17,7 +17,7 @@ class DecontaminationFilter(Filter): """ self._decontam_results = None - def apply(self, reps): + def apply(self, reps) -> None: """ Return {"no_contamination", "only_contamination"} keys for the 2 different subsets """ diff --git a/lm_eval/filters/extraction.py b/lm_eval/filters/extraction.py index 1eefc2f6..4821457f 100644 --- a/lm_eval/filters/extraction.py +++ b/lm_eval/filters/extraction.py @@ -6,7 +6,9 @@ from lm_eval.api.filter import Filter class RegexFilter(Filter): """ """ - def __init__(self, regex_pattern=r"#### (\-?[0-9\.\,]+)", fallback="[invalid]"): + def __init__( + self, regex_pattern: str = r"#### (\-?[0-9\.\,]+)", fallback: str = "[invalid]" + ) -> None: """ pass a string `regex` to run `re.compile(r"regex")` on. `fallback` defines the output returned if no matches for the regex are located. @@ -41,12 +43,11 @@ class RegexFilter(Filter): class WhitespaceFilter(Filter): """ """ - def __init__(self): + def __init__(self) -> None: pass def apply(self, resps): def filter_set(inst): - filtered_resp = [] for resp in inst: if resp.startswith(" "): diff --git a/lm_eval/filters/selection.py b/lm_eval/filters/selection.py index ef8ff5f4..54a68c38 100644 --- a/lm_eval/filters/selection.py +++ b/lm_eval/filters/selection.py @@ -4,7 +4,7 @@ from lm_eval.api.filter import Filter class TakeFirstFilter(Filter): - def __init__(self): + def __init__(self) -> None: """ Can define custom behavior here, if an individual instantiation of a Filter class should have state. """ @@ -17,8 +17,7 @@ class TakeFirstFilter(Filter): class TakeKFilter(Filter): - def __init__(self, *args, **kwargs): - + def __init__(self, *args, **kwargs) -> None: self.k = kwargs.pop("k") super().__init__(*args, **kwargs) @@ -32,7 +31,7 @@ class TakeKFilter(Filter): class MajorityVoteFilter(Filter): - def __init__(self): + def __init__(self) -> None: """ Can define custom behavior here, if an individual instantiation of a Filter class should have state. """ diff --git a/lm_eval/models/anthropic_llms.py b/lm_eval/models/anthropic_llms.py index 6dad9654..953ea913 100644 --- a/lm_eval/models/anthropic_llms.py +++ b/lm_eval/models/anthropic_llms.py @@ -76,7 +76,7 @@ class AnthropicLM(LM): max_tokens_to_sample: int = 256, temperature: float = 0, # defaults to 1 **kwargs, # top_p, top_k, etc. - ): + ) -> None: """Anthropic API wrapper. :param model: str @@ -135,11 +135,10 @@ please install anthropic via `pip install lm-eval[anthropic]` or `pip install -e def tok_decode(self, tokens: List[int]) -> str: return self.tokenizer.decode(tokens) - def _loglikelihood_tokens(self, requests, disable_tqdm=False): + def _loglikelihood_tokens(self, requests, disable_tqdm: bool = False): raise NotImplementedError("No support for logits.") def greedy_until(self, requests) -> List[str]: - if not requests: return [] diff --git a/lm_eval/models/dummy.py b/lm_eval/models/dummy.py index 9447a34b..0264e763 100644 --- a/lm_eval/models/dummy.py +++ b/lm_eval/models/dummy.py @@ -5,7 +5,7 @@ from lm_eval.api.registry import register_model @register_model("dummy") class DummyLM(LM): - def __init__(self): + def __init__(self) -> None: super().__init__() @classmethod diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index 7fd67a8e..91df7347 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -90,7 +90,7 @@ class HFLM(LM): bnb_4bit_compute_dtype: Optional[Union[str, torch.dtype]] = None, gptq: Optional[Union[bool, str]] = False, gptq_use_triton: Optional[bool] = False, - ): + ) -> None: super().__init__() assert isinstance(device, str) @@ -334,7 +334,7 @@ class HFLM(LM): return self._DEFAULT_MAX_LENGTH @property - def max_gen_toks(self): + def max_gen_toks(self) -> int: return 256 @property @@ -353,7 +353,7 @@ class HFLM(LM): def world_size(self): return self._world_size - def _detect_batch_size(self, requests=None, pos=0): + def _detect_batch_size(self, requests=None, pos: int = 0): if requests: _, context_enc, continuation_enc = requests[pos] max_length = len( @@ -419,7 +419,7 @@ class HFLM(LM): return encoding def tok_batch_encode( - self, strings: List[str], padding_side="left", left_truncate_len=None + self, strings: List[str], padding_side: str = "left", left_truncate_len=None ): # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode. old_padding_side = self.tokenizer.padding_side @@ -595,7 +595,9 @@ class HFLM(LM): return loglikelihoods - def _loglikelihood_tokens(self, requests, disable_tqdm=False, override_bs=None): + def _loglikelihood_tokens( + self, requests, disable_tqdm: bool = False, override_bs=None + ): # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context res = [] diff --git a/lm_eval/models/openai_completions.py b/lm_eval/models/openai_completions.py index 9f0ee0f6..eb05dd4c 100644 --- a/lm_eval/models/openai_completions.py +++ b/lm_eval/models/openai_completions.py @@ -69,7 +69,7 @@ class OpenaiCompletionsLM(LM): engine: str = "text-davinci-003", truncate: bool = False, batch_size: int = 1, - ): + ) -> None: """ :param engine: str @@ -99,12 +99,12 @@ class OpenaiCompletionsLM(LM): return self.end_of_text_token_id @property - def max_length(self): + def max_length(self) -> int: # Note: the OpenAI API supports up to 2049 tokens, with the first token being the first input token return 2048 @property - def max_gen_toks(self): + def max_gen_toks(self) -> int: return 256 @property @@ -152,7 +152,7 @@ class OpenaiCompletionsLM(LM): return self._loglikelihood_tokens(new_reqs) def _loglikelihood_tokens( - self, requests, disable_tqdm=False + self, requests, disable_tqdm: bool = False ) -> List[Tuple[float, bool]]: res = [] diff --git a/lm_eval/models/textsynth.py b/lm_eval/models/textsynth.py index 6ce0d475..a8fcfb9c 100644 --- a/lm_eval/models/textsynth.py +++ b/lm_eval/models/textsynth.py @@ -41,7 +41,7 @@ def textsynth_completion(**kwargs): @register_model("textsynth") class TextSynthLM(LM): - def __init__(self, engine, truncate=False): + def __init__(self, engine, truncate: bool = False) -> None: """ :param engine: str TextSynth API engine (e.g. `gptj_6B`) @@ -62,12 +62,12 @@ class TextSynthLM(LM): raise NotImplementedError() @property - def max_length(self): + def max_length(self) -> int: # NOTE: Turn on truncation to avoid errors on long inputs. return 2048 @property - def max_gen_toks(self): + def max_gen_toks(self) -> int: return 256 @property diff --git a/lm_eval/prompts/__init__.py b/lm_eval/prompts/__init__.py index 1669e9b0..e8ba6b42 100644 --- a/lm_eval/prompts/__init__.py +++ b/lm_eval/prompts/__init__.py @@ -5,7 +5,7 @@ from lm_eval.logger import eval_logger # Stores prompts in a dictionary indexed by 2 levels: # prompt category name, and prompt name. # This allows us to access prompts -PROMPT_REGISTRY = { +PROMPT_REGISTRY: dict[str, dict[str, str]] = { "qa-basic": { "question-newline-answer": "Question: {{question}}\nAnswer:", "q-newline-a": "Q: {{question}}\nA:", @@ -13,7 +13,7 @@ PROMPT_REGISTRY = { } -def get_prompt(prompt_id: str, dataset_name=None, subset_name=None): +def get_prompt(prompt_id: str, dataset_name: str = None, subset_name: str = None): # unpack prompt name category_name, prompt_name = prompt_id.split(":") if subset_name is None: diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 3fed6a09..6828b233 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -15,7 +15,7 @@ from lm_eval.api.registry import ( ) -def register_configurable_task(config): +def register_configurable_task(config: dict[str, str]) -> int: SubClass = type( config["task"] + "ConfigurableTask", (ConfigurableTask,), @@ -38,7 +38,7 @@ def register_configurable_task(config): return 0 -def check_prompt_config(config): +def check_prompt_config(config: dict[str, str]) -> List[dict[str, str]]: all_configs = [] if "use_prompt" in config: prompt_list = prompts.load_prompt_list( @@ -69,14 +69,14 @@ def check_prompt_config(config): return all_configs -def get_task_name_from_config(task_config): +def get_task_name_from_config(task_config: dict[str, str]) -> str: if "dataset_name" in task_config: return "{dataset_path}_{dataset_name}".format(**task_config) else: return "{dataset_path}".format(**task_config) -def include_task_folder(task_dir): +def include_task_folder(task_dir: str) -> None: """ Calling this function """ diff --git a/lm_eval/tasks/glue/mnli/utils.py b/lm_eval/tasks/glue/mnli/utils.py index 84d86a9b..2d5fdaec 100644 --- a/lm_eval/tasks/glue/mnli/utils.py +++ b/lm_eval/tasks/glue/mnli/utils.py @@ -1,4 +1,4 @@ -def doc_to_text(doc): +def doc_to_text(doc) -> str: return "{}\nQuestion: {} True, False or Neither?\nAnswer:".format( doc["premise"], doc["hypothesis"].strip() diff --git a/lm_eval/tasks/hendrycks_ethics/utils.py b/lm_eval/tasks/hendrycks_ethics/utils.py index 09e7d6ba..1ff0daa9 100644 --- a/lm_eval/tasks/hendrycks_ethics/utils.py +++ b/lm_eval/tasks/hendrycks_ethics/utils.py @@ -15,7 +15,7 @@ def _preproc_doc(doc): return doc -def doc_to_text(doc): +def doc_to_text(doc) -> str: doc = _preproc_doc(doc) return f"Scenario 1: {doc['scenarios'][0]}\nScenario 2: {doc['scenarios'][1]}\nQuestion: Is Scenario 1 preferable?\nAnswer:" diff --git a/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py b/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py index fc89921d..095e58b2 100644 --- a/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py +++ b/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py @@ -1,11 +1,11 @@ -def doc_to_text(doc): +def doc_to_text(doc) -> str: ctxs = "\n".join(doc["context"]["contexts"]) return "Abstract: {}\nQuestion: {}\nAnswer:".format( ctxs, doc["question"], doc["final_decision"] ) -def doc_to_target(doc): +def doc_to_target(doc) -> str: return " {}".format(doc["final_decision"]) diff --git a/lm_eval/utils.py b/lm_eval/utils.py index d96e9586..c63e8daf 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -10,7 +10,7 @@ import collections import importlib.util import fnmatch -from typing import List, Literal, Union +from typing import Iterator, List, Literal, Union import gc import torch @@ -65,7 +65,7 @@ def join_iters(iters): yield from iter -def chunks(iter, n=0, fn=None): +def chunks(iter, n: int = 0, fn=None): arr = [] for i, x in enumerate(iter): arr.append(x) @@ -87,11 +87,11 @@ def group(arr, fn): class MultiChoice: - def __init__(self, choices): + def __init__(self, choices) -> None: self.choices = choices # Simple wildcard support (linux filename patterns) - def __contains__(self, values): + def __contains__(self, values) -> bool: for value in values.split(","): if len(fnmatch.filter(self.choices, value)) == 0: eval_logger.info(f"Available tasks to choose:") @@ -100,7 +100,7 @@ class MultiChoice: raise ValueError("'{}' is not in task list".format(value)) return True - def __iter__(self): + def __iter__(self) -> Iterator: for choice in self.choices: yield choice @@ -108,7 +108,6 @@ class MultiChoice: # Returns a list containing all values of the source_list that # match at least one of the patterns def pattern_match(patterns, source_list): - if type(patterns) == str: patterns = [patterns] @@ -177,7 +176,7 @@ def make_disjoint_window(pair): class Reorderer: - def __init__(self, arr, fn): + def __init__(self, arr, fn) -> None: self.size = len(arr) arr = list(enumerate(arr)) arr = group(arr, lambda x: fn(x[1])) @@ -212,7 +211,7 @@ class Grouper: objects in `arr` satisfying `key == fn(ob)`. """ - def __init__(self, arr, fn): + def __init__(self, arr, fn) -> None: # self.orig_arr = arr self.size = len(arr) arr = list(enumerate(arr)) @@ -263,7 +262,7 @@ class Grouper: return res -def make_table(result_dict, column="results"): +def make_table(result_dict, column: str = "results"): """Generate table of results.""" from pytablewriter import MarkdownTableWriter, LatexTableWriter @@ -393,7 +392,6 @@ def get_git_commit_hash(): def import_function(loader, node): - function_name = loader.construct_scalar(node) yaml_path = os.path.dirname(loader.name) @@ -428,7 +426,6 @@ def load_yaml_config(yaml_path): include_path.reverse() final_yaml_config = {} for path in include_path: - # Assumes that path is a full path. # If not found, assume the included yaml # is in the same dir as the original yaml @@ -447,7 +444,7 @@ def load_yaml_config(yaml_path): return yaml_config -def regex_replace(string, pattern, repl, count=0): +def regex_replace(string, pattern, repl, count: int = 0): """Implements the `re.sub` function as a custom Jinja filter.""" return re.sub(pattern, repl, string, count=count) @@ -521,7 +518,7 @@ def pad_and_concat( return torch.cat(tensors, dim=0) -def clear_torch_cache(): +def clear_torch_cache() -> None: gc.collect() torch.cuda.empty_cache() @@ -546,7 +543,7 @@ class MultiTokenEOSCriteria(transformers.StoppingCriteria): tokenizer: transformers.PreTrainedTokenizer, initial_decoder_input_length: int, batch_size: int, - ): + ) -> None: self.initial_decoder_input_length = initial_decoder_input_length self.done_tracker = [False] * batch_size self.sequence = sequence diff --git a/main.py b/main.py index 169ef466..0a1f66bf 100644 --- a/main.py +++ b/main.py @@ -15,7 +15,7 @@ from lm_eval.tasks import include_task_folder os.environ["TOKENIZERS_PARALLELISM"] = "false" -def parse_args(): +def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser() parser.add_argument("--model", required=True, help="Name of model e.g. `hf`") parser.add_argument( @@ -98,7 +98,7 @@ def parse_args(): return parser.parse_args() -def main(): +def main() -> None: args = parse_args() if args.limit: -- GitLab From 8ba30fa090d6b8ff3f1c854e9048dca7a6f6501f Mon Sep 17 00:00:00 2001 From: Ethan Smith Date: Fri, 25 Aug 2023 15:03:38 -0700 Subject: [PATCH 054/264] Add trailing whitespace to mypy.ini --- mypy.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mypy.ini b/mypy.ini index 99a101c3..e6c70b0d 100644 --- a/mypy.ini +++ b/mypy.ini @@ -26,4 +26,4 @@ ignore_errors = True ignore_errors = True [mypy-main] -ignore_errors = True \ No newline at end of file +ignore_errors = True -- GitLab From cd460fc5ab1b23c7bfd39b3aff397668b1585b02 Mon Sep 17 00:00:00 2001 From: Ethan Smith Date: Fri, 25 Aug 2023 15:06:26 -0700 Subject: [PATCH 055/264] Add pyproject.toml --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 pyproject.toml diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..2f210119 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools>=40.8.0", "wheel"] +build-backend = "setuptools.build_meta" -- GitLab From a551c7895867a45c1429f26f31370bb54c859e24 Mon Sep 17 00:00:00 2001 From: Ethan Smith Date: Fri, 25 Aug 2023 18:03:54 -0700 Subject: [PATCH 056/264] Fix lint failure --- lm_eval/decontamination/janitor.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lm_eval/decontamination/janitor.py b/lm_eval/decontamination/janitor.py index 76bd39d4..209fa8ef 100644 --- a/lm_eval/decontamination/janitor.py +++ b/lm_eval/decontamination/janitor.py @@ -18,6 +18,7 @@ except Exception: T = TypeVar("T") + # Implementation from nltk source # https://www.nltk.org/_modules/nltk/util.html def form_ngrams(sequence: Iterator[T], n: int) -> Iterator[tuple[T, ...]]: @@ -166,7 +167,9 @@ class Janitor: print("WARNING: Janitor running in python mode") return self.clean_python(dirty_string) - def _split_chunks(self, dirty_string: str, dirty_parts: Sequence[tuple]) -> list[str]: + def _split_chunks( + self, dirty_string: str, dirty_parts: Sequence[tuple] + ) -> list[str]: clean_chunks = [] splice_idx = 0 end = -1 -- GitLab From 04f5697d647e193cb13cfe6b9f07f5661dd89ea8 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Sat, 26 Aug 2023 08:48:36 +0000 Subject: [PATCH 057/264] add to readme --- lm_eval/tasks/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/README.md b/lm_eval/tasks/README.md index eec57638..7d6861db 100644 --- a/lm_eval/tasks/README.md +++ b/lm_eval/tasks/README.md @@ -29,7 +29,7 @@ Boxes should be checked iff tasks are implemented in the refactor and tested for - [x] HeadQA - [x] MathQA - [x] WebQs -- [ ] WSC273 (Lintang) +- [x] WSC273 - [x] Winogrande - [x] ANLI - [x] Hendrycks Ethics (missing some tasks/metrics, see PR 660: for more info) -- GitLab From 92561822662e383b41a8400e8439687dc208de3e Mon Sep 17 00:00:00 2001 From: Benjamin Fattori Date: Sat, 26 Aug 2023 15:31:31 +0100 Subject: [PATCH 058/264] fix FSDP error with .prepare_model() --- lm_eval/models/huggingface.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index 7fd67a8e..2611c3c1 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -20,7 +20,7 @@ from lm_eval.api.registry import register_model from lm_eval.utils import MultiTokenEOSCriteria, stop_sequences_criteria -from accelerate import Accelerator, find_executable_batch_size +from accelerate import Accelerator, find_executable_batch_size, DistributedType from typing import List, Optional, Union @@ -289,9 +289,15 @@ class HFLM(LM): "Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes`. If the desired GPU is being used, this message is safe to ignore." ) else: - self._model = accelerator.prepare_model( - self.model, evaluation_mode=True - ) + assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU], "Unsupported distributed type provided. Only DDP and FSDP are supported." + if accelerator.distributed_type == DistributedType.FSDP: + self._model = accelerator.prepare( + self.model + ) + else: + self._model = accelerator.prepare_model( + self.model, evaluation_mode = True + ) self._device = torch.device(f"cuda:{accelerator.local_process_index}") self.accelerator = accelerator -- GitLab From 7c30839b12d4efeae2520a98cf7eb5da4571f627 Mon Sep 17 00:00:00 2001 From: Benjamin Fattori Date: Sat, 26 Aug 2023 15:31:58 +0100 Subject: [PATCH 059/264] bump minimum accelerate version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index cf8843ad..d8f4da36 100644 --- a/setup.py +++ b/setup.py @@ -53,7 +53,7 @@ setuptools.setup( ], python_requires=">=3.9", install_requires=[ - "accelerate>=0.18.0", + "accelerate>=0.21.0", "evaluate", "datasets>=2.0.0", "evaluate>=0.4.0", -- GitLab From 573c2bf91b6e9ab18574f4c4eb3d521c246c317b Mon Sep 17 00:00:00 2001 From: Benjamin Fattori Date: Sat, 26 Aug 2023 15:42:45 +0100 Subject: [PATCH 060/264] update README with accelerate FSDP fixes --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 38c3feaa..cfd0c75d 100644 --- a/README.md +++ b/README.md @@ -116,8 +116,10 @@ accelerate launch main.py \ This will perform *data-parallel evaluation*: that is, placing a **single full copy** of your model onto each available GPU and *splitting batches across GPUs* to evaluate on K GPUs K times faster than on one. -However, if your model *is too large to be run on a single one of your GPUs*, then we provide an alternative method to run these large models: use of the `parallelize` argument. +If your model is *is too large to be run on a single one of your GPUs* then you can use `accelerate` with Fully Sharded Data Parallel (FSDP) that splits the weights of the model across your data parallel ranks. To enable this, ensure you select `YES` when asked ```Do you want to use FullyShardedDataParallel?``` when running `accelerate config`. To enable memory-efficient loading, select `YES` when asked `Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start?`. This will ensure only the rank 0 process loads the model and then broadcasts the parameters to the other ranks instead of having each rank load all parameters which can lead to large RAM usage spikes around the start of the script that may cause errors. + +We also provide an second method to run these large models: use of the `parallelize` argument. ``` python main.py \ --model hf \ @@ -132,7 +134,7 @@ To pass even more advanced keyword arguments to `accelerate`, we allow for the f - `max_cpu_memory`: the max amount of CPU memory to use when offloading the model weights to RAM. - `offload_folder`: a folder where model weights will be offloaded to disk if needed. -Using this setting helps for massive models like BLOOM which require, or to avoid exceeding your total system RAM (by default, with `accelerate launch` one copy of the model for each GPU is initialized in RAM before moving it to GPU, resulting in large RAM usage spikes around the start of the script that may cause errors such as `Killed`.) However, it naively splits models across GPUs, resulting in only a single GPU performing work at any point in time, and so is much slower than launching with `accelerate launch`, possibly by a factor of the total # of GPUs. +Note that this method naively splits models across GPUs, resulting in only a single GPU performing work at any point in time, and so is much slower than launching with `accelerate launch`, possibly by a factor of the total # of GPUs. **Note that this option requires launching evaluation via `python main.py` rather than `accelerate launch main.py`.** -- GitLab From 352e83161d36e501d64abb0b893ec6452f11497c Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Sat, 26 Aug 2023 17:54:00 +0000 Subject: [PATCH 061/264] add mention of interface.md here --- docs/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/README.md b/docs/README.md index 20dc9b08..79280e9e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,6 +4,7 @@ Welcome to the docs for the LM Evaluation Harness! ## Table of Contents +* To learn about the public interface of the library, as well as how to evaluate via the commandline or as integrated into an external library, see the [Interface](https://github.com/EleutherAI/lm-evaluation-harness/blob/big-refactor/docs/interface.md) * To learn how to add a new library, API, or model type to the library, as well as a quick explainer on the types of ways to evaluate an LM, see the [Model Guide](https://github.com/EleutherAI/lm-evaluation-harness/blob/big-refactor/docs/model_guide.md). * For a crash course on adding new tasks to the library, see our [New Task Guide](https://github.com/EleutherAI/lm-evaluation-harness/blob/big-refactor/docs/new_task_guide.md). * To learn more about pushing the limits of task configuration that the Eval Harness supports, see the [Advanced Task Guide](https://github.com/EleutherAI/lm-evaluation-harness/blob/big-refactor/docs/advanced_task_guide.md). -- GitLab From e55da20fc235306395f3962f5a2ca23a8c909ed3 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Sat, 26 Aug 2023 17:54:22 +0000 Subject: [PATCH 062/264] add mention of interface.md here --- docs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md b/docs/README.md index 79280e9e..633018d3 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,7 +4,7 @@ Welcome to the docs for the LM Evaluation Harness! ## Table of Contents -* To learn about the public interface of the library, as well as how to evaluate via the commandline or as integrated into an external library, see the [Interface](https://github.com/EleutherAI/lm-evaluation-harness/blob/big-refactor/docs/interface.md) +* To learn about the public interface of the library, as well as how to evaluate via the commandline or as integrated into an external library, see the [Interface](https://github.com/EleutherAI/lm-evaluation-harness/blob/big-refactor/docs/user_guide.md) * To learn how to add a new library, API, or model type to the library, as well as a quick explainer on the types of ways to evaluate an LM, see the [Model Guide](https://github.com/EleutherAI/lm-evaluation-harness/blob/big-refactor/docs/model_guide.md). * For a crash course on adding new tasks to the library, see our [New Task Guide](https://github.com/EleutherAI/lm-evaluation-harness/blob/big-refactor/docs/new_task_guide.md). * To learn more about pushing the limits of task configuration that the Eval Harness supports, see the [Advanced Task Guide](https://github.com/EleutherAI/lm-evaluation-harness/blob/big-refactor/docs/advanced_task_guide.md). -- GitLab From dd5b526b59b438980ba348b7f1330883b64bae37 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 28 Aug 2023 11:25:32 +0000 Subject: [PATCH 063/264] format --- lm_eval/tasks/wsc273/utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lm_eval/tasks/wsc273/utils.py b/lm_eval/tasks/wsc273/utils.py index 6e47dc97..97a0c529 100644 --- a/lm_eval/tasks/wsc273/utils.py +++ b/lm_eval/tasks/wsc273/utils.py @@ -1,4 +1,3 @@ - upper_pronouns = [ "A", "An", @@ -13,17 +12,18 @@ upper_pronouns = [ "Their", ] -def process_doc(dataset): +def process_doc(dataset): def process_fn(doc): # The HF implementation of `wsc273` is not `partial evaluation` friendly. doc["text"] = doc["text"].replace(" ", " ") doc["options"][0] = __normalize_option(doc, doc["options"][0]) doc["options"][1] = __normalize_option(doc, doc["options"][1]) return doc - + return dataset.map(process_fn) + def __normalize_option(doc, option): # Append `'s` to possessive determiner based options. if doc["pronoun"].lower() in ["my", "his", "her", "our", "their"]: @@ -33,4 +33,4 @@ def __normalize_option(doc, option): start_of_sentence = doc["text"][doc["pronoun_loc"] - 2] == "." if not start_of_sentence and pronoun in upper_pronouns: return option.replace(pronoun, pronoun.lower()) - return option \ No newline at end of file + return option -- GitLab From 8817b561d8a1dc0dec65b7183380e25ec4c3a412 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 29 Aug 2023 00:03:57 +0700 Subject: [PATCH 064/264] Update default.yaml --- lm_eval/tasks/asdiv/default.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/asdiv/default.yaml b/lm_eval/tasks/asdiv/default.yaml index 3119daf1..ecce8cbd 100644 --- a/lm_eval/tasks/asdiv/default.yaml +++ b/lm_eval/tasks/asdiv/default.yaml @@ -1,5 +1,5 @@ task: asdiv -dataset_path: lm_eval.datasets.asdiv.asdiv +dataset_path: EleutherAI/asdiv output_type: loglikelihood validation_split: validation doc_to_text: "{{body}}\nQuestion:{{question}}\nAnswer:" -- GitLab From 94011b10b3ffba46b1d9aa5004e69c08f83ad736 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 29 Aug 2023 00:04:32 +0700 Subject: [PATCH 065/264] Delete README.md --- lm_eval/datasets/README.md | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 lm_eval/datasets/README.md diff --git a/lm_eval/datasets/README.md b/lm_eval/datasets/README.md deleted file mode 100644 index 01a02635..00000000 --- a/lm_eval/datasets/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# datasets - -This directory contains custom HuggingFace [dataset loading scripts](https://huggingface.co/docs/datasets/dataset_script). They are provided to maintain backward compatibility with the ad-hoc data downloaders in earlier versions of the `lm-evaluation-harness` before HuggingFace [`datasets`](https://huggingface.co/docs/datasets/index) was adopted as the default downloading manager. For example, some instances in the HuggingFace `datasets` repository process features (e.g. whitespace stripping, lower-casing, etc.) in ways that the `lm-evaluation-harness` did not. - -__NOTE__: We are __not__ accepting any additional loading scripts into the main branch! If you'd like to use a custom dataset, fork the repo and follow HuggingFace's loading script guide found [here](https://huggingface.co/docs/datasets/dataset_script). You can then override your `Task`'s `DATASET_PATH` attribute to point to this script's local path. - - -__WARNING__: A handful of loading scripts are included in this collection because they have not yet been pushed to the Huggingface Hub or a HuggingFace organization repo. We will remove such scripts once pushed. -- GitLab From ddfcb1fd47daf45038df0d3c75962b78737fb3ff Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 29 Aug 2023 00:04:47 +0700 Subject: [PATCH 066/264] Delete __init__.py --- lm_eval/datasets/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 lm_eval/datasets/__init__.py diff --git a/lm_eval/datasets/__init__.py b/lm_eval/datasets/__init__.py deleted file mode 100644 index e69de29b..00000000 -- GitLab From 79b6434b920620ca8e2e42e74550ef3e8d40096e Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 29 Aug 2023 00:05:05 +0700 Subject: [PATCH 067/264] Delete __init__.py --- lm_eval/datasets/asdiv/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 lm_eval/datasets/asdiv/__init__.py diff --git a/lm_eval/datasets/asdiv/__init__.py b/lm_eval/datasets/asdiv/__init__.py deleted file mode 100644 index e69de29b..00000000 -- GitLab From 1b3330f3e0a4d1f5edde31114b59c9e4418f8bd2 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 29 Aug 2023 00:05:13 +0700 Subject: [PATCH 068/264] Delete asdiv.py --- lm_eval/datasets/asdiv/asdiv.py | 111 -------------------------------- 1 file changed, 111 deletions(-) delete mode 100644 lm_eval/datasets/asdiv/asdiv.py diff --git a/lm_eval/datasets/asdiv/asdiv.py b/lm_eval/datasets/asdiv/asdiv.py deleted file mode 100644 index 927de50c..00000000 --- a/lm_eval/datasets/asdiv/asdiv.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""ASDIV dataset.""" - - -import os -import xml.etree.ElementTree as ET - -import datasets - - -_CITATION = """\ -@misc{miao2021diverse, - title={A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers}, - author={Shen-Yun Miao and Chao-Chun Liang and Keh-Yih Su}, - year={2021}, - eprint={2106.15772}, - archivePrefix={arXiv}, - primaryClass={cs.AI} -} -""" - -_DESCRIPTION = """\ -ASDiv (Academia Sinica Diverse MWP Dataset) is a diverse (in terms of both language -patterns and problem types) English math word problem (MWP) corpus for evaluating -the capability of various MWP solvers. Existing MWP corpora for studying AI progress -remain limited either in language usage patterns or in problem types. We thus present -a new English MWP corpus with 2,305 MWPs that cover more text patterns and most problem -types taught in elementary school. Each MWP is annotated with its problem type and grade -level (for indicating the level of difficulty). -""" - -_HOMEPAGE = "https://github.com/chaochun/nlu-asdiv-dataset" - -# TODO: Add the licence for the dataset here if you can find it -_LICENSE = "" - -_URLS = "https://github.com/chaochun/nlu-asdiv-dataset/archive/55790e5270bb91ccfa5053194b25732534696b50.zip" - - -class ASDiv(datasets.GeneratorBasedBuilder): - """ASDiv: A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers""" - - VERSION = datasets.Version("0.0.1") - - BUILDER_CONFIGS = [ - datasets.BuilderConfig( - name="asdiv", - version=VERSION, - description="A diverse corpus for evaluating and developing english math word problem solvers", - ) - ] - - def _info(self): - features = datasets.Features( - { - "body": datasets.Value("string"), - "question": datasets.Value("string"), - "solution_type": datasets.Value("string"), - "answer": datasets.Value("string"), - "formula": datasets.Value("string"), - } - ) - return datasets.DatasetInfo( - description=_DESCRIPTION, - features=features, - homepage=_HOMEPAGE, - license=_LICENSE, - citation=_CITATION, - ) - - def _split_generators(self, dl_manager): - urls = _URLS - data_dir = dl_manager.download_and_extract(urls) - base_filepath = "nlu-asdiv-dataset-55790e5270bb91ccfa5053194b25732534696b50" - return [ - datasets.SplitGenerator( - name=datasets.Split.VALIDATION, - # These kwargs will be passed to _generate_examples - gen_kwargs={ - "filepath": os.path.join( - data_dir, base_filepath, "dataset", "ASDiv.xml" - ), - "split": datasets.Split.VALIDATION, - }, - ), - ] - - # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` - def _generate_examples(self, filepath, split): - tree = ET.parse(filepath) - root = tree.getroot() - for key, problem in enumerate(root.iter("Problem")): - yield key, { - "body": problem.find("Body").text, - "question": problem.find("Question").text, - "solution_type": problem.find("Solution-Type").text, - "answer": problem.find("Answer").text, - "formula": problem.find("Formula").text, - } -- GitLab From 25a667f5330247fec17c5c19bc0d76eee3b20f5f Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 29 Aug 2023 00:05:22 +0700 Subject: [PATCH 069/264] Delete dataset_infos.json --- lm_eval/datasets/asdiv/dataset_infos.json | 1 - 1 file changed, 1 deletion(-) delete mode 100644 lm_eval/datasets/asdiv/dataset_infos.json diff --git a/lm_eval/datasets/asdiv/dataset_infos.json b/lm_eval/datasets/asdiv/dataset_infos.json deleted file mode 100644 index cfeea0d3..00000000 --- a/lm_eval/datasets/asdiv/dataset_infos.json +++ /dev/null @@ -1 +0,0 @@ -{"asdiv": {"description": "ASDiv (Academia Sinica Diverse MWP Dataset) is a diverse (in terms of both language\npatterns and problem types) English math word problem (MWP) corpus for evaluating\nthe capability of various MWP solvers. Existing MWP corpora for studying AI progress\nremain limited either in language usage patterns or in problem types. We thus present\na new English MWP corpus with 2,305 MWPs that cover more text patterns and most problem\ntypes taught in elementary school. Each MWP is annotated with its problem type and grade\nlevel (for indicating the level of difficulty).\n", "citation": "@misc{miao2021diverse,\n title={A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers},\n author={Shen-Yun Miao and Chao-Chun Liang and Keh-Yih Su},\n year={2021},\n eprint={2106.15772},\n archivePrefix={arXiv},\n primaryClass={cs.AI}\n}\n", "homepage": "https://github.com/chaochun/nlu-asdiv-dataset", "license": "", "features": {"body": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "solution_type": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "formula": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "as_div", "config_name": "asdiv", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"validation": {"name": "validation", "num_bytes": 501489, "num_examples": 2305, "dataset_name": "as_div"}}, "download_checksums": {"https://github.com/chaochun/nlu-asdiv-dataset/archive/55790e5270bb91ccfa5053194b25732534696b50.zip": {"num_bytes": 440966, "checksum": "8f1fe4f6d5f170ec1e24ab78c244153c14c568b1bb2b1dad0324e71f37939a2d"}}, "download_size": 440966, "post_processing_size": null, "dataset_size": 501489, "size_in_bytes": 942455}} -- GitLab From 5172d176a26524201126a8ef38dbc459471fc743 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 29 Aug 2023 04:35:50 +0000 Subject: [PATCH 070/264] update to format --- lm_eval/api/task.py | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 195493b4..b55e359d 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -1044,20 +1044,14 @@ class ConfigurableTask(Task): else: gold = str(gold) -<<<<<<< HEAD - for metric in self._metric_fn_list.keys(): - result = results[0] -======= result = results[0] for metric in self._metric_fn_list.keys(): ->>>>>>> 4cda3a1c476fce0d721d12da049ab9758b780650 if self.multiple_target: # in the case where we have multiple targets, # return true if any are true # TODO: this may break for multipLe_target, non zero-or-1 metrics scores = [] for gold_option in gold: -<<<<<<< HEAD try: result_score = self._metric_fn_list[metric]( references=[gold_option], @@ -1072,23 +1066,11 @@ class ConfigurableTask(Task): # TODO: this handles the case where HF evaluate returns a dict. result_score = result_score[metric] scores.append(result_score) -======= - res = self._metric_fn_list[metric]( - references=[gold_option], - predictions=[result], - **self._metric_fn_kwargs[metric], - ) - if isinstance(res, dict): - # TODO: this handles the case where HF evaluate returns a dict. - res = res[metric] - scores.append(res) ->>>>>>> 4cda3a1c476fce0d721d12da049ab9758b780650 if any(scores): result_score = 1.0 else: result_score = 0.0 else: -<<<<<<< HEAD try: result_score = self._metric_fn_list[metric]( references=[gold], @@ -1097,21 +1079,10 @@ class ConfigurableTask(Task): ) except TypeError: # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics result_score = self._metric_fn_list[metric]([gold, result]) - if isinstance(result_score, dict): - result_dict.update(result_score) - else: - result_dict[metric] = result_score -======= - result_score = self._metric_fn_list[metric]( - references=[gold], - predictions=[result], - **self._metric_fn_kwargs[metric], - ) if isinstance(result_score, dict): # TODO: this handles the case where HF evaluate returns a dict. result_score = result_score[metric] result_dict[metric] = result_score ->>>>>>> 4cda3a1c476fce0d721d12da049ab9758b780650 else: raise ValueError( f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ", -- GitLab From d1c3cb3d0c6530d6a4a4eea3e5ddbe470557796e Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 29 Aug 2023 15:38:10 +0000 Subject: [PATCH 071/264] expanded benchmark to allow new source of prompt templates --- lm_eval/benchmarks/__init__.py | 15 +++- lm_eval/benchmarks/flan/cot_template_yaml | 21 ++++++ lm_eval/benchmarks/flan/cot_yaml | 37 ++++++++++ lm_eval/benchmarks/flan/flan_boolq.yaml | 32 +++++++++ lm_eval/benchmarks/flan/flan_held_in.yaml | 37 ++++++++++ lm_eval/benchmarks/flan/held_in_template_yaml | 13 ++++ lm_eval/prompts/__init__.py | 51 ++++++++++++-- lm_eval/utils.py | 70 ++++++++++--------- main.py | 1 + 9 files changed, 236 insertions(+), 41 deletions(-) create mode 100644 lm_eval/benchmarks/flan/cot_template_yaml create mode 100644 lm_eval/benchmarks/flan/cot_yaml create mode 100644 lm_eval/benchmarks/flan/flan_boolq.yaml create mode 100644 lm_eval/benchmarks/flan/flan_held_in.yaml create mode 100644 lm_eval/benchmarks/flan/held_in_template_yaml diff --git a/lm_eval/benchmarks/__init__.py b/lm_eval/benchmarks/__init__.py index 4924691d..e9ba7477 100644 --- a/lm_eval/benchmarks/__init__.py +++ b/lm_eval/benchmarks/__init__.py @@ -14,7 +14,7 @@ from lm_eval.api.registry import ( def include_benchmarks(task_dir): for root, subdirs, file_list in os.walk(task_dir): - if (subdirs == [] or subdirs == ["__pycache__"]) and (len(file_list) > 0): + if (subdirs == [] or "__pycache__" in subdirs) and (len(file_list) > 0): for f in file_list: if f.endswith(".yaml"): try: @@ -23,6 +23,9 @@ def include_benchmarks(task_dir): with open(benchmark_path, "rb") as file: yaml_config = yaml.full_load(file) + if "prompts" in yaml_config: + continue # Skip it + assert "group" in yaml_config group = yaml_config["group"] all_task_list = yaml_config["task"] @@ -34,6 +37,16 @@ def include_benchmarks(task_dir): ] for task_config in config_list: + yaml_dir = os.path.dirname(benchmark_path) + task_config = utils.load_yaml_config( + yaml_config=task_config, yaml_dir=yaml_dir + ) + if "use_prompt" in task_config: + if "yaml" in task_config["use_prompt"]: + task_config["use_prompt"] = os.path.join( + root, task_config["use_prompt"] + ) + var_configs = check_prompt_config( { **task_config, diff --git a/lm_eval/benchmarks/flan/cot_template_yaml b/lm_eval/benchmarks/flan/cot_template_yaml new file mode 100644 index 00000000..bca422ac --- /dev/null +++ b/lm_eval/benchmarks/flan/cot_template_yaml @@ -0,0 +1,21 @@ +group: zero-shot-cot +output_type: greedy_until +validation_split: validation +doc_to_target: "{{answer}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +generation_kwargs: + until: + - "\n\n" + do_sample: false + temperature: 0.0 +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)" + - function: "take_first" diff --git a/lm_eval/benchmarks/flan/cot_yaml b/lm_eval/benchmarks/flan/cot_yaml new file mode 100644 index 00000000..75ac4a10 --- /dev/null +++ b/lm_eval/benchmarks/flan/cot_yaml @@ -0,0 +1,37 @@ +group: flan_cot +task: + - include: cot_template_yaml + dataset_path: super_glue + dataset_name: boolq + use_prompt: promptsource:* + validation_split: validation + - include: cot_template_yaml + dataset_path: super_glue + dataset_name: rte + use_prompt: promptsource:* + validation_split: validation + - include: cot_template_yaml + task: anli_r1 + dataset_path: anli + use_prompt: promptsource:* + validation_split: dev_r1 + - include: cot_template_yaml + task: anli_r2 + dataset_path: anli + use_prompt: promptsource:* + validation_split: dev_r2 + - include: cot_template_yaml + task: anli_r3 + dataset_path: anli + use_prompt: promptsource:* + validation_split: dev_r3 + - include: cot_template_yaml + task: ai2_arc + dataset_path: ARC-Easy + use_prompt: promptsource:* + validation_split: validation + - include: cot_template_yaml + task: ai2_arc + dataset_path: ARC-Challange + use_prompt: promptsource:* + validation_split: validation diff --git a/lm_eval/benchmarks/flan/flan_boolq.yaml b/lm_eval/benchmarks/flan/flan_boolq.yaml new file mode 100644 index 00000000..c99292f2 --- /dev/null +++ b/lm_eval/benchmarks/flan/flan_boolq.yaml @@ -0,0 +1,32 @@ +# Flan Prompt Templates +prompts: + "template-0": + doc_to_text: "{{text}}\n\nCan we conclude that {{question}}?\n\n{{options_}}" + doc_to_target: "{{answer}}" + "template-1": + doc_to_text: "{{text}}\n\nIs it true that {{question}}?\n\n{{options_}}" + doc_to_target: "{{answer}}" + "template-2": + doc_to_text: "{{text}}\n\n{{question}}?\n\n{{options_}}" + doc_to_target: "{{answer}}" + "template-3": + doc_to_text: "Text: {{text}}\n\nQuestion: {{question}}?\n\n{{options_}}" + doc_to_target: "{{answer}}" + "template-4": + doc_to_text: "{{text}}\n\nWhat's the best answer to this question: {{question}}?\n\n{{options_}}" + doc_to_target: "{{answer}}" + "template-5": + doc_to_text: "{{text}}\nBased on the above text what's the best answer to this question: {{question}}?\n\n{{options_}}" + doc_to_target: "{{answer}}" + "template-6": + doc_to_text: "{{text}}\nAnswer this question making sure that the answer is supposed by the text: {{question}}?\n\n{{options_}}" + doc_to_target: "{{answer}}" + "template-7": + doc_to_text: "{{text}}\n\nIs the following statement correct based on the text\n\n{{question}}\n\n{{options_}}" + doc_to_target: "{{answer}}" + "template-8": + doc_to_text: "{{title}}\n\n{{text}}\n\nIs this statement correct \"{{question}}\"?\n\n{{options_}}" + doc_to_target: "{{answer}}" + "template-9": + doc_to_text: "Is it true that {{question}} based on the following text?\n\n{{text}}\n\n{{options_}}" + doc_to_target: "{{answer}}" diff --git a/lm_eval/benchmarks/flan/flan_held_in.yaml b/lm_eval/benchmarks/flan/flan_held_in.yaml new file mode 100644 index 00000000..232db0e0 --- /dev/null +++ b/lm_eval/benchmarks/flan/flan_held_in.yaml @@ -0,0 +1,37 @@ +group: flan_held_in +task: + - include: held_in_template_yaml + dataset_path: super_glue + dataset_name: boolq + use_prompt: flan_boolq.yaml:* + validation_split: validation + # - include: held_in_template_yaml + # dataset_path: super_glue + # dataset_name: rte + # use_prompt: local:* + # validation_split: validation + # - include: held_in_template_yaml + # task: anli_r1 + # dataset_path: anli + # use_prompt: local:* + # validation_split: dev_r1 + # - include: held_in_template_yaml + # task: anli_r2 + # dataset_path: anli + # use_prompt: local:* + # validation_split: dev_r2 + # - include: held_in_template_yaml + # task: anli_r3 + # dataset_path: anli + # use_prompt: local:* + # validation_split: dev_r3 + # - include: held_in_template_yaml + # task: ai2_arc + # dataset_path: ARC-Easy + # use_prompt: local:* + # validation_split: validation + # - include: held_in_template_yaml + # task: ai2_arc + # dataset_path: ARC-Challange + # use_prompt: local:* + # validation_split: validation diff --git a/lm_eval/benchmarks/flan/held_in_template_yaml b/lm_eval/benchmarks/flan/held_in_template_yaml new file mode 100644 index 00000000..f28774c6 --- /dev/null +++ b/lm_eval/benchmarks/flan/held_in_template_yaml @@ -0,0 +1,13 @@ +output_type: greedy_until +validation_split: validation +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +generation_kwargs: + until: + - "\n\n" + do_sample: false + temperature: 0.0 diff --git a/lm_eval/prompts/__init__.py b/lm_eval/prompts/__init__.py index 1669e9b0..701119cf 100644 --- a/lm_eval/prompts/__init__.py +++ b/lm_eval/prompts/__init__.py @@ -44,6 +44,14 @@ def get_prompt(prompt_id: str, dataset_name=None, subset_name=None): raise ValueError( f"{prompt_name} not in prompt list {prompts.all_template_names}" ) + elif ".yaml" in category_name: + import yaml + + with open(category_name, "rb") as file: + prompt_yaml_file = yaml.full_load(file) + + prompt_string = prompt_yaml_file["prompts"][prompt_name] + return PromptString(prompt_string) else: try: return PROMPT_REGISTRY[category_name][prompt_name] @@ -56,13 +64,42 @@ def get_prompt(prompt_id: str, dataset_name=None, subset_name=None): def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, **kwargs): - from promptsource.templates import DatasetTemplates + category_name, prompt_name = use_prompt.split(":") - if subset_name is None: - prompts = DatasetTemplates(dataset_name=dataset_name) - else: - prompts = DatasetTemplates(dataset_name=dataset_name, subset_name=subset_name) + if category_name == "promptsource": + from promptsource.templates import DatasetTemplates + + if subset_name is None: + prompts = DatasetTemplates(dataset_name=dataset_name) + else: + prompts = DatasetTemplates( + dataset_name=dataset_name, subset_name=subset_name + ) + + prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names) + + elif ".yaml" in category_name: + import yaml + + with open(category_name, "rb") as file: + prompt_yaml_file = yaml.full_load(file) + + prompt_list = utils.pattern_match( + prompt_name, prompt_yaml_file["prompts"].keys() + ) - category_name, prompt_name = use_prompt.split(":") - prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names) return [":".join([category_name, prompt]) for prompt in prompt_list] + + +class PromptString: + def __init__(prompt_string): + self.prompt_string = prompt_string + + def apply(self, doc): + + doc_to_text = self.prompt_string["doc_to_text"] + doc_to_target = self.prompt_string["doc_to_target"] + text_string = utils.apply_template(doc_to_text, doc) + target_string = utils.apply_template(doc_to_target, doc) + + return [text_string, target_string] diff --git a/lm_eval/utils.py b/lm_eval/utils.py index d96e9586..7c55d11d 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -412,39 +412,43 @@ def import_function(loader, node): yaml.add_constructor("!function", import_function) -def load_yaml_config(yaml_path): - with open(yaml_path, "rb") as file: - yaml_config = yaml.full_load(file) - yaml_dir = os.path.dirname(yaml_path) - - if "include" in yaml_config: - include_path = yaml_config["include"] - del yaml_config["include"] - - if type(include_path) == str: - include_path = [include_path] - - # Load from the last one first - include_path.reverse() - final_yaml_config = {} - for path in include_path: - - # Assumes that path is a full path. - # If not found, assume the included yaml - # is in the same dir as the original yaml - if not os.path.isfile(path): - path = os.path.join(yaml_dir, path) - - try: - included_yaml_config = load_yaml_config(path) - final_yaml_config.update(included_yaml_config) - except Exception as ex: - # If failed to load, ignore - raise ex - - final_yaml_config.update(yaml_config) - return final_yaml_config - return yaml_config +def load_yaml_config(yaml_path=None, yaml_config=None, yaml_dir=None): + + if yaml_config is None: + with open(yaml_path, "rb") as file: + yaml_config = yaml.full_load(file) + yaml_dir = os.path.dirname(yaml_path) + + assert yaml_dir is not None + + if "include" in yaml_config: + include_path = yaml_config["include"] + del yaml_config["include"] + + if type(include_path) == str: + include_path = [include_path] + + # Load from the last one first + include_path.reverse() + final_yaml_config = {} + for path in include_path: + + # Assumes that path is a full path. + # If not found, assume the included yaml + # is in the same dir as the original yaml + if not os.path.isfile(path): + path = os.path.join(yaml_dir, path) + + try: + included_yaml_config = load_yaml_config(path) + final_yaml_config.update(included_yaml_config) + except Exception as ex: + # If failed to load, ignore + raise ex + + final_yaml_config.update(yaml_config) + return final_yaml_config + return yaml_config def regex_replace(string, pattern, repl, count=0): diff --git a/main.py b/main.py index 169ef466..bb5647ef 100644 --- a/main.py +++ b/main.py @@ -11,6 +11,7 @@ from lm_eval import evaluator, utils from lm_eval.api.registry import ALL_TASKS from lm_eval.logger import eval_logger from lm_eval.tasks import include_task_folder +from lm_eval.benchmarks import include_benchmarks os.environ["TOKENIZERS_PARALLELISM"] = "false" -- GitLab From 4bdf11e1785bc6b2005153449975d05eaf7fd8b1 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 29 Aug 2023 16:09:58 +0000 Subject: [PATCH 072/264] udpated to work --- lm_eval/benchmarks/flan/flan_held_in.yaml | 12 +++++------ .../{ => prompt_templates}/flan_boolq.yaml | 20 +++++++++---------- lm_eval/prompts/__init__.py | 2 +- 3 files changed, 17 insertions(+), 17 deletions(-) rename lm_eval/benchmarks/flan/{ => prompt_templates}/flan_boolq.yaml (60%) diff --git a/lm_eval/benchmarks/flan/flan_held_in.yaml b/lm_eval/benchmarks/flan/flan_held_in.yaml index 232db0e0..3ed0fb5d 100644 --- a/lm_eval/benchmarks/flan/flan_held_in.yaml +++ b/lm_eval/benchmarks/flan/flan_held_in.yaml @@ -3,13 +3,13 @@ task: - include: held_in_template_yaml dataset_path: super_glue dataset_name: boolq - use_prompt: flan_boolq.yaml:* + use_prompt: prompt_templates/flan_boolq.yaml:* + validation_split: validation + - include: held_in_template_yaml + dataset_path: super_glue + dataset_name: rte + use_prompt: prompt_templates/flan_rte.yaml:* validation_split: validation - # - include: held_in_template_yaml - # dataset_path: super_glue - # dataset_name: rte - # use_prompt: local:* - # validation_split: validation # - include: held_in_template_yaml # task: anli_r1 # dataset_path: anli diff --git a/lm_eval/benchmarks/flan/flan_boolq.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_boolq.yaml similarity index 60% rename from lm_eval/benchmarks/flan/flan_boolq.yaml rename to lm_eval/benchmarks/flan/prompt_templates/flan_boolq.yaml index c99292f2..ebb7a1f9 100644 --- a/lm_eval/benchmarks/flan/flan_boolq.yaml +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_boolq.yaml @@ -1,32 +1,32 @@ # Flan Prompt Templates prompts: "template-0": - doc_to_text: "{{text}}\n\nCan we conclude that {{question}}?\n\n{{options_}}" + doc_to_text: "{{text}}\n\nCan we conclude that {{question}}?\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{answer}}" "template-1": - doc_to_text: "{{text}}\n\nIs it true that {{question}}?\n\n{{options_}}" + doc_to_text: "{{text}}\n\nIs it true that {{question}}?\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{answer}}" "template-2": - doc_to_text: "{{text}}\n\n{{question}}?\n\n{{options_}}" + doc_to_text: "{{text}}\n\n{{question}}?\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{answer}}" "template-3": - doc_to_text: "Text: {{text}}\n\nQuestion: {{question}}?\n\n{{options_}}" + doc_to_text: "Text: {{text}}\n\nQuestion: {{question}}?\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{answer}}" "template-4": - doc_to_text: "{{text}}\n\nWhat's the best answer to this question: {{question}}?\n\n{{options_}}" + doc_to_text: "{{text}}\n\nWhat's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{answer}}" "template-5": - doc_to_text: "{{text}}\nBased on the above text what's the best answer to this question: {{question}}?\n\n{{options_}}" + doc_to_text: "{{text}}\nBased on the above text what's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{answer}}" "template-6": - doc_to_text: "{{text}}\nAnswer this question making sure that the answer is supposed by the text: {{question}}?\n\n{{options_}}" + doc_to_text: "{{text}}\nAnswer this question making sure that the answer is supposed by the text: {{question}}?\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{answer}}" "template-7": - doc_to_text: "{{text}}\n\nIs the following statement correct based on the text\n\n{{question}}\n\n{{options_}}" + doc_to_text: "{{text}}\n\nIs the following statement correct based on the text\n\n{{question}}\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{answer}}" "template-8": - doc_to_text: "{{title}}\n\n{{text}}\n\nIs this statement correct \"{{question}}\"?\n\n{{options_}}" + doc_to_text: "{{title}}\n\n{{text}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{answer}}" "template-9": - doc_to_text: "Is it true that {{question}} based on the following text?\n\n{{text}}\n\n{{options_}}" + doc_to_text: "Is it true that {{question}} based on the following text?\n\n{{text}}\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{answer}}" diff --git a/lm_eval/prompts/__init__.py b/lm_eval/prompts/__init__.py index 701119cf..1f746607 100644 --- a/lm_eval/prompts/__init__.py +++ b/lm_eval/prompts/__init__.py @@ -92,7 +92,7 @@ def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, **kwa class PromptString: - def __init__(prompt_string): + def __init__(self, prompt_string): self.prompt_string = prompt_string def apply(self, doc): -- GitLab From f1620c2d9451c8095067eca24f8f4723d7df9cdd Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 29 Aug 2023 23:23:02 +0700 Subject: [PATCH 073/264] Update main.py Added ``` from lm_eval.benchmarks import include_benchmarks ``` in order for benchmarks to callable. --- main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/main.py b/main.py index 169ef466..bb5647ef 100644 --- a/main.py +++ b/main.py @@ -11,6 +11,7 @@ from lm_eval import evaluator, utils from lm_eval.api.registry import ALL_TASKS from lm_eval.logger import eval_logger from lm_eval.tasks import include_task_folder +from lm_eval.benchmarks import include_benchmarks os.environ["TOKENIZERS_PARALLELISM"] = "false" -- GitLab From db79d92743784911f18829aae9adaeab186d222d Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 29 Aug 2023 16:34:16 +0000 Subject: [PATCH 074/264] added subtasks --- lm_eval/benchmarks/flan/flan_held_in.yaml | 30 +++++++++---------- .../flan/prompt_templates/flan_anli.yaml | 29 ++++++++++++++++++ .../flan/prompt_templates/flan_rte.yaml | 29 ++++++++++++++++++ 3 files changed, 73 insertions(+), 15 deletions(-) create mode 100644 lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml create mode 100644 lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml diff --git a/lm_eval/benchmarks/flan/flan_held_in.yaml b/lm_eval/benchmarks/flan/flan_held_in.yaml index 3ed0fb5d..3d558747 100644 --- a/lm_eval/benchmarks/flan/flan_held_in.yaml +++ b/lm_eval/benchmarks/flan/flan_held_in.yaml @@ -10,21 +10,21 @@ task: dataset_name: rte use_prompt: prompt_templates/flan_rte.yaml:* validation_split: validation - # - include: held_in_template_yaml - # task: anli_r1 - # dataset_path: anli - # use_prompt: local:* - # validation_split: dev_r1 - # - include: held_in_template_yaml - # task: anli_r2 - # dataset_path: anli - # use_prompt: local:* - # validation_split: dev_r2 - # - include: held_in_template_yaml - # task: anli_r3 - # dataset_path: anli - # use_prompt: local:* - # validation_split: dev_r3 + - include: held_in_template_yaml + task: anli_r1 + dataset_path: anli + use_prompt: prompt_templates/flan_anli.yaml:* + validation_split: dev_r1 + - include: held_in_template_yaml + task: anli_r2 + dataset_path: anli + use_prompt: prompt_templates/flan_anli.yaml:* + validation_split: dev_r2 + - include: held_in_template_yaml + task: anli_r3 + dataset_path: anli + use_prompt: prompt_templates/flan_anli.yaml:* + validation_split: dev_r3 # - include: held_in_template_yaml # task: ai2_arc # dataset_path: ARC-Easy diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml new file mode 100644 index 00000000..206fb00e --- /dev/null +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml @@ -0,0 +1,29 @@ +# Flan Prompt Templates +prompts: + "template-0": + doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nI think the answer is" + doc_to_target: "{{answer}}" + "template-1": + doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{answer}}" + "template-2": + doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{answer}}" + "template-3": + doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{answer}}" + "template-4": + doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nThe answer is:" + doc_to_target: "{{answer}}" + "template-5": + doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" + doc_to_target: "{{answer}}" + "template-6": + doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{answer}}" + "template-7": + doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{answer}}" + "template-8": + doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{answer}}" diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml new file mode 100644 index 00000000..79356aed --- /dev/null +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml @@ -0,0 +1,29 @@ +# Flan Prompt Templates +prompts: + "template-0": + doc_to_text: "{{premise}}\n\nQuestion with options: Based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- yes\n- no" + doc_to_target: "{{answer}}" + "template-1": + doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that the sentence below is true?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" + doc_to_target: "{{answer}}" + "template-2": + doc_to_text: "{{premise}}\n\nQ with options: Can we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" + doc_to_target: "{{answer}}" + "template-3": + doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" + doc_to_target: "{{answer}}" + "template-4": + doc_to_text: "{{premise}}\nOPTIONS:\n- yes\n- no\nQuestion: Can we infer the following?\n{{hypothesis}}" + doc_to_target: "{{answer}}" + "template-5": + doc_to_text: "Read the following paragraph and determine if the hypothesis is true. Select from options at the end:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nThe answer is" + doc_to_target: "{{answer}}" + "template-6": + doc_to_text: "Read the text and determine if the sentence is true:\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nA:" + doc_to_target: "{{answer}}" + "template-7": + doc_to_text: "Question with options: can we draw the following hypothesis from the context? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nA:" + doc_to_target: "{{answer}}" + "template-8": + doc_to_text: "Determine if the sentence is true based on the text below. Choose from options.\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- yes\n- no" + doc_to_target: "{{answer}}" -- GitLab From 7e875dcf81e051b00500efa12ef93e8800376eef Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 29 Aug 2023 16:52:44 +0000 Subject: [PATCH 075/264] moved templates, edit templates --- .../flan/{cot_yaml => flan_cot_yaml} | 0 lm_eval/benchmarks/flan/flan_held_in.yaml | 37 ----------------- .../flan/prompt_templates/flan_anli.yaml | 36 ++++++++--------- .../flan/prompt_templates/flan_boolq.yaml | 40 +++++++++---------- .../flan/prompt_templates/flan_rte.yaml | 18 ++++----- .../{ => yaml_templates}/cot_template_yaml | 2 +- .../held_in_template_yaml | 0 7 files changed, 48 insertions(+), 85 deletions(-) rename lm_eval/benchmarks/flan/{cot_yaml => flan_cot_yaml} (100%) delete mode 100644 lm_eval/benchmarks/flan/flan_held_in.yaml rename lm_eval/benchmarks/flan/{ => yaml_templates}/cot_template_yaml (95%) rename lm_eval/benchmarks/flan/{ => yaml_templates}/held_in_template_yaml (100%) diff --git a/lm_eval/benchmarks/flan/cot_yaml b/lm_eval/benchmarks/flan/flan_cot_yaml similarity index 100% rename from lm_eval/benchmarks/flan/cot_yaml rename to lm_eval/benchmarks/flan/flan_cot_yaml diff --git a/lm_eval/benchmarks/flan/flan_held_in.yaml b/lm_eval/benchmarks/flan/flan_held_in.yaml deleted file mode 100644 index 3d558747..00000000 --- a/lm_eval/benchmarks/flan/flan_held_in.yaml +++ /dev/null @@ -1,37 +0,0 @@ -group: flan_held_in -task: - - include: held_in_template_yaml - dataset_path: super_glue - dataset_name: boolq - use_prompt: prompt_templates/flan_boolq.yaml:* - validation_split: validation - - include: held_in_template_yaml - dataset_path: super_glue - dataset_name: rte - use_prompt: prompt_templates/flan_rte.yaml:* - validation_split: validation - - include: held_in_template_yaml - task: anli_r1 - dataset_path: anli - use_prompt: prompt_templates/flan_anli.yaml:* - validation_split: dev_r1 - - include: held_in_template_yaml - task: anli_r2 - dataset_path: anli - use_prompt: prompt_templates/flan_anli.yaml:* - validation_split: dev_r2 - - include: held_in_template_yaml - task: anli_r3 - dataset_path: anli - use_prompt: prompt_templates/flan_anli.yaml:* - validation_split: dev_r3 - # - include: held_in_template_yaml - # task: ai2_arc - # dataset_path: ARC-Easy - # use_prompt: local:* - # validation_split: validation - # - include: held_in_template_yaml - # task: ai2_arc - # dataset_path: ARC-Challange - # use_prompt: local:* - # validation_split: validation diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml index 206fb00e..99819344 100644 --- a/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml @@ -1,29 +1,29 @@ # Flan Prompt Templates prompts: "template-0": - doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nI think the answer is" - doc_to_target: "{{answer}}" + doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is" + doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" "template-1": - doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{answer}}" + doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" "template-2": - doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{answer}}" + doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" "template-3": - doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{answer}}" + doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" "template-4": - doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nThe answer is:" - doc_to_target: "{{answer}}" + doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:" + doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" "template-5": - doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" - doc_to_target: "{{answer}}" + doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" + doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" "template-6": - doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{answer}}" + doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" "template-7": - doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{answer}}" + doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" "template-8": - doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{answer}}" + doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_boolq.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_boolq.yaml index ebb7a1f9..dc6703d5 100644 --- a/lm_eval/benchmarks/flan/prompt_templates/flan_boolq.yaml +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_boolq.yaml @@ -1,32 +1,32 @@ # Flan Prompt Templates prompts: "template-0": - doc_to_text: "{{text}}\n\nCan we conclude that {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{answer}}" + doc_to_text: "{{passage}}\n\nCan we conclude that {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" "template-1": - doc_to_text: "{{text}}\n\nIs it true that {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{answer}}" + doc_to_text: "{{passage}}\n\nIs it true that {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" "template-2": - doc_to_text: "{{text}}\n\n{{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{answer}}" + doc_to_text: "{{passage}}\n\n{{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" "template-3": - doc_to_text: "Text: {{text}}\n\nQuestion: {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{answer}}" + doc_to_text: "Text: {{passage}}\n\nQuestion: {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" "template-4": - doc_to_text: "{{text}}\n\nWhat's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{answer}}" + doc_to_text: "{{passage}}\n\nWhat's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" "template-5": - doc_to_text: "{{text}}\nBased on the above text what's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{answer}}" + doc_to_text: "{{passage}}\nBased on the above text what's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" "template-6": - doc_to_text: "{{text}}\nAnswer this question making sure that the answer is supposed by the text: {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{answer}}" + doc_to_text: "{{passage}}\nAnswer this question making sure that the answer is supposed by the text: {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" "template-7": - doc_to_text: "{{text}}\n\nIs the following statement correct based on the text\n\n{{question}}\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{answer}}" + doc_to_text: "{{passage}}\n\nIs the following statement correct based on the text\n\n{{question}}\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" "template-8": - doc_to_text: "{{title}}\n\n{{text}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{answer}}" + doc_to_text: "{{title}}\n\n{{passage}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" "template-9": - doc_to_text: "Is it true that {{question}} based on the following text?\n\n{{text}}\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{answer}}" + doc_to_text: "Is it true that {{question}} based on the following text?\n\n{{passage}}\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml index 79356aed..616829f0 100644 --- a/lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml @@ -2,28 +2,28 @@ prompts: "template-0": doc_to_text: "{{premise}}\n\nQuestion with options: Based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{answer}}" + doc_to_target: "{{[['yes', 'no']][label]}}" "template-1": doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that the sentence below is true?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{answer}}" + doc_to_target: "{{[['yes', 'no']][label]}}" "template-2": doc_to_text: "{{premise}}\n\nQ with options: Can we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{answer}}" + doc_to_target: "{{[['yes', 'no']][label]}}" "template-3": doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{answer}}" + doc_to_target: "{{[['yes', 'no']][label]}}" "template-4": doc_to_text: "{{premise}}\nOPTIONS:\n- yes\n- no\nQuestion: Can we infer the following?\n{{hypothesis}}" - doc_to_target: "{{answer}}" + doc_to_target: "{{[['yes', 'no']][label]}}" "template-5": doc_to_text: "Read the following paragraph and determine if the hypothesis is true. Select from options at the end:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nThe answer is" - doc_to_target: "{{answer}}" + doc_to_target: "{{[['yes', 'no']][label]}}" "template-6": doc_to_text: "Read the text and determine if the sentence is true:\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nA:" - doc_to_target: "{{answer}}" + doc_to_target: "{{[['yes', 'no']][label]}}" "template-7": doc_to_text: "Question with options: can we draw the following hypothesis from the context? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nA:" - doc_to_target: "{{answer}}" + doc_to_target: "{{[['yes', 'no']][label]}}" "template-8": doc_to_text: "Determine if the sentence is true based on the text below. Choose from options.\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{answer}}" + doc_to_target: "{{[['yes', 'no']][label]}}" diff --git a/lm_eval/benchmarks/flan/cot_template_yaml b/lm_eval/benchmarks/flan/yaml_templates/cot_template_yaml similarity index 95% rename from lm_eval/benchmarks/flan/cot_template_yaml rename to lm_eval/benchmarks/flan/yaml_templates/cot_template_yaml index bca422ac..0cb0d16e 100644 --- a/lm_eval/benchmarks/flan/cot_template_yaml +++ b/lm_eval/benchmarks/flan/yaml_templates/cot_template_yaml @@ -1,4 +1,4 @@ -group: zero-shot-cot +group: flan-cot output_type: greedy_until validation_split: validation doc_to_target: "{{answer}}" diff --git a/lm_eval/benchmarks/flan/held_in_template_yaml b/lm_eval/benchmarks/flan/yaml_templates/held_in_template_yaml similarity index 100% rename from lm_eval/benchmarks/flan/held_in_template_yaml rename to lm_eval/benchmarks/flan/yaml_templates/held_in_template_yaml -- GitLab From 0a5bd86978c6c66b03b87f85b872d7e17232a7f9 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 07:17:52 +0000 Subject: [PATCH 076/264] updated yaml templates --- .../flan/prompt_templates/flan_anli.yaml | 18 +++++++++--------- .../flan/prompt_templates/flan_boolq.yaml | 3 ++- .../flan/prompt_templates/flan_rte.yaml | 18 +++++++++--------- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml index 99819344..525e9e0c 100644 --- a/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml @@ -2,28 +2,28 @@ prompts: "template-0": doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is" - doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-1": doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-2": doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-3": doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-4": doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:" - doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-5": doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" - doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-6": doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-7": doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-8": doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[['Yes', 'It\'s impossible to say', 'No']][label]}}" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_boolq.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_boolq.yaml index dc6703d5..f8c8ebfc 100644 --- a/lm_eval/benchmarks/flan/prompt_templates/flan_boolq.yaml +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_boolq.yaml @@ -25,7 +25,8 @@ prompts: doc_to_text: "{{passage}}\n\nIs the following statement correct based on the text\n\n{{question}}\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{['no', 'yes'][label]}}" "template-8": - doc_to_text: "{{title}}\n\n{{passage}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes" + # doc_to_text: "{{title}}\n\n{{passage}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes" + doc_to_text: "{{passage}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes" doc_to_target: "{{['no', 'yes'][label]}}" "template-9": doc_to_text: "Is it true that {{question}} based on the following text?\n\n{{passage}}\n\nOPTIONS:\n- no\n- yes" diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml index 616829f0..7893eae4 100644 --- a/lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_rte.yaml @@ -2,28 +2,28 @@ prompts: "template-0": doc_to_text: "{{premise}}\n\nQuestion with options: Based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{[['yes', 'no']][label]}}" + doc_to_target: "{{['yes', 'no'][label]}}" "template-1": doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that the sentence below is true?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{[['yes', 'no']][label]}}" + doc_to_target: "{{['yes', 'no'][label]}}" "template-2": doc_to_text: "{{premise}}\n\nQ with options: Can we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{[['yes', 'no']][label]}}" + doc_to_target: "{{['yes', 'no'][label]}}" "template-3": doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{[['yes', 'no']][label]}}" + doc_to_target: "{{['yes', 'no'][label]}}" "template-4": doc_to_text: "{{premise}}\nOPTIONS:\n- yes\n- no\nQuestion: Can we infer the following?\n{{hypothesis}}" - doc_to_target: "{{[['yes', 'no']][label]}}" + doc_to_target: "{{['yes', 'no'][label]}}" "template-5": doc_to_text: "Read the following paragraph and determine if the hypothesis is true. Select from options at the end:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nThe answer is" - doc_to_target: "{{[['yes', 'no']][label]}}" + doc_to_target: "{{['yes', 'no'][label]}}" "template-6": doc_to_text: "Read the text and determine if the sentence is true:\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nA:" - doc_to_target: "{{[['yes', 'no']][label]}}" + doc_to_target: "{{['yes', 'no'][label]}}" "template-7": doc_to_text: "Question with options: can we draw the following hypothesis from the context? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nA:" - doc_to_target: "{{[['yes', 'no']][label]}}" + doc_to_target: "{{['yes', 'no'][label]}}" "template-8": doc_to_text: "Determine if the sentence is true based on the text below. Choose from options.\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{[['yes', 'no']][label]}}" + doc_to_target: "{{['yes', 'no'][label]}}" -- GitLab From c0cb0be7b764e03fc07dbb36345c78381a02c8b0 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 07:18:18 +0000 Subject: [PATCH 077/264] moved benchmark up --- lm_eval/benchmarks/flan_held_in.yaml | 37 ++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 lm_eval/benchmarks/flan_held_in.yaml diff --git a/lm_eval/benchmarks/flan_held_in.yaml b/lm_eval/benchmarks/flan_held_in.yaml new file mode 100644 index 00000000..91979f9f --- /dev/null +++ b/lm_eval/benchmarks/flan_held_in.yaml @@ -0,0 +1,37 @@ +group: flan_held_in +task: + - include: flan/yaml_templates/held_in_template_yaml + dataset_path: super_glue + dataset_name: boolq + use_prompt: flan/prompt_templates/flan_boolq.yaml:* + validation_split: validation + - include: flan/yaml_templates//held_in_template_yaml + dataset_path: super_glue + dataset_name: rte + use_prompt: flan/prompt_templates/flan_rte.yaml:* + validation_split: validation + - include: flan/yaml_templates//held_in_template_yaml + task: anli_r1 + dataset_path: anli + use_prompt: flan/prompt_templates/flan_anli.yaml:* + validation_split: dev_r1 + - include: flan/yaml_templates//held_in_template_yaml + task: anli_r2 + dataset_path: anli + use_prompt: flan/prompt_templates/flan_anli.yaml:* + validation_split: dev_r2 + - include: flan/yaml_templates//held_in_template_yaml + task: anli_r3 + dataset_path: anli + use_prompt: flan/prompt_templates/flan_anli.yaml:* + validation_split: dev_r3 + # - include: flan/yaml_templates//held_in_template_yaml + # task: ai2_arc + # dataset_path: ARC-Easy + # use_prompt: local:* + # validation_split: validation + # - include: flan/yaml_templates//held_in_template_yaml + # task: ai2_arc + # dataset_path: ARC-Challange + # use_prompt: local:* + # validation_split: validation -- GitLab From 65e5bc69de7b091a11a29603cb2053a570d53094 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 07:38:10 +0000 Subject: [PATCH 078/264] added checks for if gold is outside choice options --- lm_eval/api/task.py | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index cae48368..d93628f2 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -1014,18 +1014,36 @@ class ConfigurableTask(Task): gold = self.doc_to_text(doc) else: gold = self.doc_to_target(doc) - if type(gold) is str: - gold = choices.index(gold) + + gold_index_error = False + if type(gold) is list: + gold = [i if i < len(choices) else -1000 for i in gold] + if -1000 in gold: + gold_index_error = True + else: + if type(gold) is int: + gold = gold if i < len(choice) else -1000 + elif type(gold) is str: + gold = choices.index(gold) if gold in choices else -1000 + + if gold == -1000: + gold_index_error = True + + if gold_index_error: + eval_logger.warning( + f"Label index was not in within range of availabel choices," + f"Sample:\n\n{doc}\n\n" + ) if self.multiple_target: acc = 1.0 if pred in gold else 0.0 acc_norm = 1.0 if pred_norm in gold else 0.0 - exact_match = int(any([is_greedy[i] for i in gold])) + exact_match = int(any([is_greedy[i] if i != -1000 else 0 for i in gold])) else: acc = 1.0 if pred == gold else 0.0 acc_norm = 1.0 if pred_norm == gold else 0.0 # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly - exact_match = int(is_greedy[gold]) + exact_match = int(is_greedy[gold]) if i != -1000 else 0 result_dict = { **({"acc": acc} if "acc" in use_metric else {}), -- GitLab From cddbf9f6db94a4a4600c0eaf733c39024a4f8148 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 07:45:23 +0000 Subject: [PATCH 079/264] reformat --- lm_eval/api/task.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index d93628f2..b91e954d 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -1022,28 +1022,30 @@ class ConfigurableTask(Task): gold_index_error = True else: if type(gold) is int: - gold = gold if i < len(choice) else -1000 + gold = gold if gold < len(choices) else -1000 elif type(gold) is str: - gold = choices.index(gold) if gold in choices else -1000 - + gold = choices.index(gold) if gold in choices else -1000 + if gold == -1000: gold_index_error = True if gold_index_error: eval_logger.warning( - f"Label index was not in within range of availabel choices," + f"Label index was not in within range of available choices," f"Sample:\n\n{doc}\n\n" ) if self.multiple_target: acc = 1.0 if pred in gold else 0.0 acc_norm = 1.0 if pred_norm in gold else 0.0 - exact_match = int(any([is_greedy[i] if i != -1000 else 0 for i in gold])) + exact_match = int( + any([is_greedy[i] if i != -1000 else 0 for i in gold]) + ) else: acc = 1.0 if pred == gold else 0.0 acc_norm = 1.0 if pred_norm == gold else 0.0 # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly - exact_match = int(is_greedy[gold]) if i != -1000 else 0 + exact_match = int(is_greedy[gold]) if gold != -1000 else 0 result_dict = { **({"acc": acc} if "acc" in use_metric else {}), -- GitLab From f9558ce5ddea0dee5ad0e6e8fc5bc5676765aed3 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 10:32:08 +0000 Subject: [PATCH 080/264] add drop --- lm_eval/tasks/drop/README.md | 47 ++++++++++++++++++++++++++++ lm_eval/tasks/drop/default.yaml | 9 ++++++ lm_eval/tasks/drop/utils.py | 54 +++++++++++++++++++++++++++++++++ 3 files changed, 110 insertions(+) create mode 100644 lm_eval/tasks/drop/README.md create mode 100644 lm_eval/tasks/drop/default.yaml create mode 100644 lm_eval/tasks/drop/utils.py diff --git a/lm_eval/tasks/drop/README.md b/lm_eval/tasks/drop/README.md new file mode 100644 index 00000000..d31104bd --- /dev/null +++ b/lm_eval/tasks/drop/README.md @@ -0,0 +1,47 @@ +# DROP + +### Paper + +Title: `DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs` + +Abstract: https://aclanthology.org/attachments/N19-1246.Supplementary.pdf + +DROP is a QA dataset which tests comprehensive understanding of paragraphs. In +this crowdsourced, adversarially-created, 96k question-answering benchmark, a +system must resolve multiple references in a question, map them onto a paragraph, +and perform discrete operations over them (such as addition, counting, or sorting). + +Homepage: https://allenai.org/data/drop + +Acknowledgement: This implementation is based on the official evaluation for `DROP`: +https://github.com/allenai/allennlp-reading-comprehension/blob/master/allennlp_rc/eval/drop_eval.py + +### Citation + +``` +BibTeX-formatted citation goes here +``` + +### Groups and Tasks + +#### Groups + +* `group_name`: `Short description` + +#### Tasks + +* `task_name`: `1-sentence description of what this particular task does` +* `task_name2`: ... + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/drop/default.yaml b/lm_eval/tasks/drop/default.yaml new file mode 100644 index 00000000..7e624fe8 --- /dev/null +++ b/lm_eval/tasks/drop/default.yaml @@ -0,0 +1,9 @@ +task: drop +dataset_path: EleutherAI/drop +output_type: greedy_until +training_split: train +validation_split: test +doc_to_text: "Passage: {{passage}}\nQuestion: {{question}}\nAnswer:" +doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}" +should_decontaminate: true +doc_to_decontamination_query: "{{passage}} {{question}}" diff --git a/lm_eval/tasks/drop/utils.py b/lm_eval/tasks/drop/utils.py new file mode 100644 index 00000000..269f1991 --- /dev/null +++ b/lm_eval/tasks/drop/utils.py @@ -0,0 +1,54 @@ + +def process_doc(dataset): + + def _process(doc): + return { + "id": doc["query_id"], + "passage": doc["passage"], + "question": doc["question"], + "answers": get_answers(doc), + } + return dataset.map(_process) + + +def get_answers(doc): + def _flatten_validated_answers(validated_answers): + """Flattens a dict of lists of validated answers. + {"number": ['1', '8'], ...} + -> [{"number": ['1'], ...}, {"number": ['8'], ...}] + """ + valid_answers = [] + for i in range(len(validated_answers["number"])): + valid_answers.append( + { + "number": validated_answers["number"][i], + "date": validated_answers["date"][i], + "spans": validated_answers["spans"][i], + } + ) + return valid_answers + + answers = [] + answers_set = set() + candidates = [doc["answer"]] + _flatten_validated_answers( + doc["validated_answers"] + ) + for candidate in candidates: + answer = parse_answer(candidate) + if answer in answers_set: + continue + answers_set.add(answer) + answers.append(answer) + return answers + +def parse_answer(answer): + # NOTE: Everything is returned as a tuple for uniformity and hashability. + if answer["number"] != "": + return (str(answer["number"]),) + if answer["spans"] != []: + return tuple(answer["spans"]) + return ( + " ".join( + [answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]] + ).strip(), + ) \ No newline at end of file -- GitLab From 64d4600c4717a578e76cbf5236d03e43f17937d4 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 11:19:12 +0000 Subject: [PATCH 081/264] add coqa --- lm_eval/tasks/coqa/README.md | 43 ++++++++++++++++++ lm_eval/tasks/coqa/default.yaml | 20 +++++++++ lm_eval/tasks/coqa/utils.py | 80 +++++++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+) create mode 100644 lm_eval/tasks/coqa/README.md create mode 100644 lm_eval/tasks/coqa/default.yaml create mode 100644 lm_eval/tasks/coqa/utils.py diff --git a/lm_eval/tasks/coqa/README.md b/lm_eval/tasks/coqa/README.md new file mode 100644 index 00000000..77347e4f --- /dev/null +++ b/lm_eval/tasks/coqa/README.md @@ -0,0 +1,43 @@ +# CoQA + +### Paper + +Title: `CoQA: A Conversational Question Answering Challenge` + +Abstract: https://arxiv.org/pdf/1808.07042.pdf + +CoQA is a large-scale dataset for building Conversational Question Answering +systems. The goal of the CoQA challenge is to measure the ability of machines to +understand a text passage and answer a series of interconnected questions that +appear in a conversation. + +Homepage: https://stanfordnlp.github.io/coqa/ + +### Citation + +``` +BibTeX-formatted citation goes here +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `coqa` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/coqa/default.yaml b/lm_eval/tasks/coqa/default.yaml new file mode 100644 index 00000000..5b891192 --- /dev/null +++ b/lm_eval/tasks/coqa/default.yaml @@ -0,0 +1,20 @@ +task: coqa +dataset_path: EleutherAI/coqa +output_type: greedy_until +training_split: train +validation_split: validation +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +process_results: !function utils.process_results +should_decontaminate: true +doc_to_decontamination_query: "{{story}} {{question.input_text|join('\n')}}" +generation_kwargs: + until: + - "\nQ:" +metric_list: + - metric: em + aggregation: mean + higher_is_better: true + - metric: f1 + aggregation: mean + higher_is_better: true diff --git a/lm_eval/tasks/coqa/utils.py b/lm_eval/tasks/coqa/utils.py new file mode 100644 index 00000000..9191e779 --- /dev/null +++ b/lm_eval/tasks/coqa/utils.py @@ -0,0 +1,80 @@ +from itertools import zip_longest + +import transformers.data.metrics.squad_metrics as squad_metrics + +def doc_to_text(doc): + # Given a passage p, the conversation history {q1, a1, . . . qi−1, ai−1} + # and a question qi, the task is to predict the answer ai + doc_text = doc["story"] + "\n\n" + for (q, a) in zip_longest( + doc["questions"]["input_text"], doc["answers"]["input_text"][:-1] + ): # omit target answer ai + question = f"Q: {q}\n\n" + answer = f"A: {a}\n\n" if a is not None else "A:" + doc_text += question + answer + return doc_text + + +def doc_to_target(doc): + + turn_id = len(doc["questions"]["input_text"]) + # Returns unique answers and valid alternatives (Some questions in CoQA have multiple valid answers). + answers = [] + answer_forturn = doc["answers"]["input_text"][turn_id - 1] + answers.append(answer_forturn) + + additional_answers = doc.get("additional_answers") + if additional_answers: + for key in additional_answers: + additional_answer_for_turn = additional_answers[key]["input_text"][ + turn_id - 1 + ] + if additional_answer_for_turn.lower() not in map(str.lower, answers): + answers.append(additional_answer_for_turn) + return answers + + +def em(gold_list, pred): + # tests for exact match and on the normalised answer (compute_exact) + em_sum = 0.0 + if len(gold_list) > 1: + for i in range(len(gold_list)): + gold_answers = gold_list[0:i] + gold_list[i + 1 :] + # predictions compared against (n) golds and take maximum + em_sum += max( + squad_metrics.compute_exact(a, pred) for a in gold_answers + ) + else: + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list) + + return em_sum / max(1, len(gold_list)) + +def compute_scores(gold_list, pred): + # tests for exact match and on the normalised answer (compute_exact) + # test for overlap (compute_f1) + f1_sum = 0.0 + em_sum = 0.0 + if len(gold_list) > 1: + for i in range(len(gold_list)): + gold_answers = gold_list[0:i] + gold_list[i + 1 :] + # predictions compared against (n) golds and take maximum + em_sum += max( + squad_metrics.compute_exact(a, pred) for a in gold_answers + ) + f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_answers) + else: + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list) + f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_list) + + return { + "em": em_sum / max(1, len(gold_list)), + "f1": f1_sum / max(1, len(gold_list)), + } + +def process_results(doc, results): + + gold_list = doc_to_target(doc) + pred = results[0].strip().split("\n")[0] + + scores = compute_scores(gold_list, pred) + return scores -- GitLab From 79aa53b1ec0e6bf2cf161444c509170bf3662d2b Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 11:57:20 +0000 Subject: [PATCH 082/264] running process for drop --- lm_eval/tasks/README.md | 2 +- lm_eval/tasks/drop/README.md | 14 ++- lm_eval/tasks/drop/default.yaml | 19 +++- lm_eval/tasks/drop/utils.py | 160 +++++++++++++++++++++++++++++++- 4 files changed, 182 insertions(+), 13 deletions(-) diff --git a/lm_eval/tasks/README.md b/lm_eval/tasks/README.md index eec57638..f920705a 100644 --- a/lm_eval/tasks/README.md +++ b/lm_eval/tasks/README.md @@ -6,7 +6,7 @@ Boxes should be checked iff tasks are implemented in the refactor and tested for - [x] Glue - [x] SuperGlue - [ ] CoQA (Lintang) -- [ ] DROP (Lintang) +- [x] DROP - [x] ~~Lambada~~ - [x] Lambada (Cloze variants) - [x] ~~Lambada (Multilingual)~~ diff --git a/lm_eval/tasks/drop/README.md b/lm_eval/tasks/drop/README.md index d31104bd..6b7fc47b 100644 --- a/lm_eval/tasks/drop/README.md +++ b/lm_eval/tasks/drop/README.md @@ -19,19 +19,25 @@ https://github.com/allenai/allennlp-reading-comprehension/blob/master/allennlp_r ### Citation ``` -BibTeX-formatted citation goes here +@misc{dua2019drop, + title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs}, + author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner}, + year={2019}, + eprint={1903.00161}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} ``` ### Groups and Tasks #### Groups -* `group_name`: `Short description` +* Not part of a group yet. #### Tasks -* `task_name`: `1-sentence description of what this particular task does` -* `task_name2`: ... +* `drop` ### Checklist diff --git a/lm_eval/tasks/drop/default.yaml b/lm_eval/tasks/drop/default.yaml index 7e624fe8..973fff7b 100644 --- a/lm_eval/tasks/drop/default.yaml +++ b/lm_eval/tasks/drop/default.yaml @@ -2,8 +2,21 @@ task: drop dataset_path: EleutherAI/drop output_type: greedy_until training_split: train -validation_split: test -doc_to_text: "Passage: {{passage}}\nQuestion: {{question}}\nAnswer:" -doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}" +validation_split: validation +process_docs: !function utils.process_docs +doc_to_text: "{{passage}} {{question}}" +doc_to_target: "{{ answer|join(',')}}" +target_delimiter: "" +process_results: !function utils.process_results should_decontaminate: true doc_to_decontamination_query: "{{passage}} {{question}}" +generation_kwargs: + until: + - "." +metric_list: + - metric: em + aggregation: mean + higher_is_better: true + - metric: f1 + aggregation: mean + higher_is_better: true diff --git a/lm_eval/tasks/drop/utils.py b/lm_eval/tasks/drop/utils.py index 269f1991..1e2888ce 100644 --- a/lm_eval/tasks/drop/utils.py +++ b/lm_eval/tasks/drop/utils.py @@ -1,6 +1,13 @@ +import re +import string -def process_doc(dataset): +import numpy as np +from scipy.optimize import linear_sum_assignment +_ARTICLES = re.compile(r"\b(a|an|the)\b", re.UNICODE) + + +def process_docs(dataset): def _process(doc): return { "id": doc["query_id"], @@ -8,6 +15,7 @@ def process_doc(dataset): "question": doc["question"], "answers": get_answers(doc), } + return dataset.map(_process) @@ -30,9 +38,7 @@ def get_answers(doc): answers = [] answers_set = set() - candidates = [doc["answer"]] + _flatten_validated_answers( - doc["validated_answers"] - ) + candidates = [doc["answer"]] + _flatten_validated_answers(doc["validated_answers"]) for candidate in candidates: answer = parse_answer(candidate) if answer in answers_set: @@ -41,6 +47,7 @@ def get_answers(doc): answers.append(answer) return answers + def parse_answer(answer): # NOTE: Everything is returned as a tuple for uniformity and hashability. if answer["number"] != "": @@ -51,4 +58,147 @@ def parse_answer(answer): " ".join( [answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]] ).strip(), - ) \ No newline at end of file + ) + + +def process_results(doc, results): + + preds, golds = results, doc["answers"] + max_em = 0 + max_f1 = 0 + for gold_answer in golds: + exact_match, f1_score = get_metrics(preds, gold_answer) + if gold_answer[0].strip(): + max_em = max(max_em, exact_match) + max_f1 = max(max_f1, f1_score) + return {"em": max_em, "f1": max_f1} + + +def get_metrics(predicted, gold): + """ + Takes a predicted answer and a gold answer (that are both either a string or a list of + strings), and returns exact match and the DROP F1 metric for the prediction. If you are + writing a script for evaluating objects in memory (say, the output of predictions during + validation, or while training), this is the function you want to call, after using + :func:`answer_json_to_strings` when reading the gold answer from the released data file. + """ + predicted_bags = _answer_to_bags(predicted) + gold_bags = _answer_to_bags(gold) + + if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len( + gold_bags[0] + ): + exact_match = 1.0 + else: + exact_match = 0.0 + + f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1]) + f1 = np.mean(f1_per_bag) + f1 = round(f1, 2) + return exact_match, f1 + + +def _answer_to_bags(answer): + if isinstance(answer, (list, tuple)): + raw_spans = answer + else: + raw_spans = [answer] + normalized_spans = [] + token_bags = [] + for raw_span in raw_spans: + normalized_span = _normalize(raw_span) + normalized_spans.append(normalized_span) + token_bags.append(set(normalized_span.split())) + return normalized_spans, token_bags + + +def _align_bags(predicted, gold): + """ + Takes gold and predicted answer sets and first finds the optimal 1-1 alignment + between them and gets maximum metric values over all the answers. + """ + scores = np.zeros([len(gold), len(predicted)]) + for gold_index, gold_item in enumerate(gold): + for pred_index, pred_item in enumerate(predicted): + if _match_numbers_if_present(gold_item, pred_item): + scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item) + row_ind, col_ind = linear_sum_assignment(-scores) + + max_scores = np.zeros([max(len(gold), len(predicted))]) + for row, column in zip(row_ind, col_ind): + max_scores[row] = max(max_scores[row], scores[row, column]) + return max_scores + + +def _compute_f1(predicted_bag, gold_bag): + intersection = len(gold_bag.intersection(predicted_bag)) + if not predicted_bag: + precision = 1.0 + else: + precision = intersection / float(len(predicted_bag)) + if not gold_bag: + recall = 1.0 + else: + recall = intersection / float(len(gold_bag)) + f1 = ( + (2 * precision * recall) / (precision + recall) + if not (precision == 0.0 and recall == 0.0) + else 0.0 + ) + return f1 + + +def _match_numbers_if_present(gold_bag, predicted_bag): + gold_numbers = set() + predicted_numbers = set() + for word in gold_bag: + if _is_number(word): + gold_numbers.add(word) + for word in predicted_bag: + if _is_number(word): + predicted_numbers.add(word) + if (not gold_numbers) or gold_numbers.intersection(predicted_numbers): + return True + return False + + +def _is_number(text): + try: + float(text) + return True + except ValueError: + return False + + +def _remove_articles(text): + return _ARTICLES.sub(" ", text) + + +def _white_space_fix(text): + return " ".join(text.split()) + + +def _remove_punc(text): + exclude = set(string.punctuation) + if not _is_number(text): + return "".join(ch for ch in text if ch not in exclude) + else: + return text + + +def _fix_number(text): + return str(float(text)) if _is_number(text) else text + + +def _tokenize(text): + return re.split(" |-", text) + + +def _normalize(answer): + tokens = [ + _white_space_fix(_remove_articles(_fix_number(_remove_punc(token.lower())))) + for token in _tokenize(answer) + ] + tokens = [token for token in tokens if token.strip()] + normalized = " ".join(tokens).strip() + return normalized -- GitLab From f555a583b2617ea88b21c55d0e4cadb8530c62bd Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 11:58:15 +0000 Subject: [PATCH 083/264] fix formatting --- lm_eval/tasks/coqa/utils.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/lm_eval/tasks/coqa/utils.py b/lm_eval/tasks/coqa/utils.py index 9191e779..4fed8ff8 100644 --- a/lm_eval/tasks/coqa/utils.py +++ b/lm_eval/tasks/coqa/utils.py @@ -2,6 +2,7 @@ from itertools import zip_longest import transformers.data.metrics.squad_metrics as squad_metrics + def doc_to_text(doc): # Given a passage p, the conversation history {q1, a1, . . . qi−1, ai−1} # and a question qi, the task is to predict the answer ai @@ -41,14 +42,13 @@ def em(gold_list, pred): for i in range(len(gold_list)): gold_answers = gold_list[0:i] + gold_list[i + 1 :] # predictions compared against (n) golds and take maximum - em_sum += max( - squad_metrics.compute_exact(a, pred) for a in gold_answers - ) + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_answers) else: em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list) return em_sum / max(1, len(gold_list)) + def compute_scores(gold_list, pred): # tests for exact match and on the normalised answer (compute_exact) # test for overlap (compute_f1) @@ -58,9 +58,7 @@ def compute_scores(gold_list, pred): for i in range(len(gold_list)): gold_answers = gold_list[0:i] + gold_list[i + 1 :] # predictions compared against (n) golds and take maximum - em_sum += max( - squad_metrics.compute_exact(a, pred) for a in gold_answers - ) + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_answers) f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_answers) else: em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list) @@ -71,6 +69,7 @@ def compute_scores(gold_list, pred): "f1": f1_sum / max(1, len(gold_list)), } + def process_results(doc, results): gold_list = doc_to_target(doc) -- GitLab From 8287fe7cf2bb6d4dfae919c7a7ffee9c322df59f Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 11:59:28 +0000 Subject: [PATCH 084/264] checked coqa on readme --- lm_eval/tasks/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/README.md b/lm_eval/tasks/README.md index eec57638..33935920 100644 --- a/lm_eval/tasks/README.md +++ b/lm_eval/tasks/README.md @@ -5,7 +5,7 @@ Boxes should be checked iff tasks are implemented in the refactor and tested for - [x] Glue - [x] SuperGlue -- [ ] CoQA (Lintang) +- [x] CoQA - [ ] DROP (Lintang) - [x] ~~Lambada~~ - [x] Lambada (Cloze variants) -- GitLab From 8d1d003db5772224d0348c81260101c963bba236 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Wed, 30 Aug 2023 21:18:00 +0700 Subject: [PATCH 085/264] Update task.py --- lm_eval/api/task.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index b91e954d..09a27954 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -1017,16 +1017,16 @@ class ConfigurableTask(Task): gold_index_error = False if type(gold) is list: - gold = [i if i < len(choices) else -1000 for i in gold] - if -1000 in gold: + gold = [i if i < len(choices) else -100 for i in gold] + if -100 in gold: gold_index_error = True else: if type(gold) is int: - gold = gold if gold < len(choices) else -1000 + gold = gold if gold < len(choices) else -100 elif type(gold) is str: - gold = choices.index(gold) if gold in choices else -1000 + gold = choices.index(gold) if gold in choices else -100 - if gold == -1000: + if gold == -100: gold_index_error = True if gold_index_error: @@ -1039,13 +1039,13 @@ class ConfigurableTask(Task): acc = 1.0 if pred in gold else 0.0 acc_norm = 1.0 if pred_norm in gold else 0.0 exact_match = int( - any([is_greedy[i] if i != -1000 else 0 for i in gold]) + any([is_greedy[i] if i != -100 else 0 for i in gold]) ) else: acc = 1.0 if pred == gold else 0.0 acc_norm = 1.0 if pred_norm == gold else 0.0 # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly - exact_match = int(is_greedy[gold]) if gold != -1000 else 0 + exact_match = int(is_greedy[gold]) if gold != -100 else 0 result_dict = { **({"acc": acc} if "acc" in use_metric else {}), -- GitLab From ebfff08f313a78c7500e324b43c791f5c1d44d74 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Wed, 30 Aug 2023 21:21:15 +0700 Subject: [PATCH 086/264] Update task.py --- lm_eval/api/task.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 09a27954..eefc14a1 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -1038,9 +1038,7 @@ class ConfigurableTask(Task): if self.multiple_target: acc = 1.0 if pred in gold else 0.0 acc_norm = 1.0 if pred_norm in gold else 0.0 - exact_match = int( - any([is_greedy[i] if i != -100 else 0 for i in gold]) - ) + exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold])) else: acc = 1.0 if pred == gold else 0.0 acc_norm = 1.0 if pred_norm == gold else 0.0 -- GitLab From 618c5936dd3a8661bbf527f7043a1315bd3c090f Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 14:42:00 +0000 Subject: [PATCH 087/264] make visible available tasks --- main.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/main.py b/main.py index 1c2e6f6f..dadc5b69 100644 --- a/main.py +++ b/main.py @@ -16,16 +16,18 @@ os.environ["TOKENIZERS_PARALLELISM"] = "false" def parse_args(): - parser = argparse.ArgumentParser() + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("--model", required=True, help="Name of model e.g. `hf`") + parser.add_argument( + "--tasks", + default=None, + help="Available Tasks:\n - {}".format("\n - ".join(sorted(ALL_TASKS))), + ) parser.add_argument( "--model_args", default="", help="String arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`", ) - parser.add_argument( - "--tasks", default=None, choices=utils.MultiChoice(sorted(ALL_TASKS)) - ) parser.add_argument( "--num_fewshot", type=int, -- GitLab From c605a71009239288265d579f46a80146363f26dc Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 14:52:06 +0000 Subject: [PATCH 088/264] add logging information --- lm_eval/logger.py | 2 ++ main.py | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lm_eval/logger.py b/lm_eval/logger.py index 9cb59e05..f87fbee3 100644 --- a/lm_eval/logger.py +++ b/lm_eval/logger.py @@ -6,3 +6,5 @@ logging.basicConfig( level=logging.INFO, ) eval_logger = logging.getLogger("lm-eval") + +SPACING = " "*47 \ No newline at end of file diff --git a/main.py b/main.py index dadc5b69..6b28e2ae 100644 --- a/main.py +++ b/main.py @@ -9,7 +9,7 @@ from pathlib import Path from lm_eval import evaluator, utils from lm_eval.api.registry import ALL_TASKS -from lm_eval.logger import eval_logger +from lm_eval.logger import eval_logger, SPACING from lm_eval.tasks import include_task_folder os.environ["TOKENIZERS_PARALLELISM"] = "false" @@ -131,6 +131,12 @@ def main(): if os.path.isfile(task): config = utils.load_yaml_config(task) task_names.append(config) + + if task_names == []: + eval_logger.error( + "No tasks were detected,\n" + f"{SPACING}Try `lm-eval -h` for list of available tasks", + ) if args.output_path: path = Path(args.output_path) -- GitLab From ee59182d7fb59103f13a55500b3e1d439504e796 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 14:53:15 +0000 Subject: [PATCH 089/264] formatting --- lm_eval/logger.py | 2 +- main.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lm_eval/logger.py b/lm_eval/logger.py index f87fbee3..129c112e 100644 --- a/lm_eval/logger.py +++ b/lm_eval/logger.py @@ -7,4 +7,4 @@ logging.basicConfig( ) eval_logger = logging.getLogger("lm-eval") -SPACING = " "*47 \ No newline at end of file +SPACING = " " * 47 diff --git a/main.py b/main.py index 6b28e2ae..d54bd4f4 100644 --- a/main.py +++ b/main.py @@ -131,12 +131,12 @@ def main(): if os.path.isfile(task): config = utils.load_yaml_config(task) task_names.append(config) - + if task_names == []: eval_logger.error( "No tasks were detected,\n" f"{SPACING}Try `lm-eval -h` for list of available tasks", - ) + ) if args.output_path: path = Path(args.output_path) -- GitLab From 24754ee41de38d24b2a86228c6e10db03a9fbf48 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 16:35:39 +0000 Subject: [PATCH 090/264] add bbh --- lm_eval/tasks/bbh/README.md | 45 +++++++++++++++++++ lm_eval/tasks/bbh/_generate_configs.py | 29 ++++++++++++ lm_eval/tasks/bbh/_template_yaml | 17 +++++++ lm_eval/tasks/bbh/boolean_expressions.yaml | 4 ++ lm_eval/tasks/bbh/causal_judgement.yaml | 4 ++ lm_eval/tasks/bbh/date_understanding.yaml | 4 ++ lm_eval/tasks/bbh/disambiguation_qa.yaml | 4 ++ lm_eval/tasks/bbh/dyck_languages.yaml | 4 ++ lm_eval/tasks/bbh/formal_fallacies.yaml | 4 ++ lm_eval/tasks/bbh/geometric_shapes.yaml | 4 ++ lm_eval/tasks/bbh/hyperbaton.yaml | 4 ++ .../bbh/logical_deduction_five_objects.yaml | 4 ++ .../bbh/logical_deduction_seven_objects.yaml | 4 ++ .../bbh/logical_deduction_three_objects.yaml | 4 ++ lm_eval/tasks/bbh/movie_recommendation.yaml | 4 ++ .../tasks/bbh/multistep_arithmetic_two.yaml | 4 ++ lm_eval/tasks/bbh/navigate.yaml | 4 ++ lm_eval/tasks/bbh/object_counting.yaml | 4 ++ lm_eval/tasks/bbh/penguins_in_a_table.yaml | 4 ++ .../bbh/reasoning_about_colored_objects.yaml | 4 ++ lm_eval/tasks/bbh/ruin_names.yaml | 4 ++ .../salient_translation_error_detection.yaml | 4 ++ lm_eval/tasks/bbh/snarks.yaml | 4 ++ lm_eval/tasks/bbh/sports_understanding.yaml | 4 ++ lm_eval/tasks/bbh/temporal_sequences.yaml | 4 ++ ...racking_shuffled_objects_five_objects.yaml | 4 ++ ...acking_shuffled_objects_seven_objects.yaml | 4 ++ ...acking_shuffled_objects_three_objects.yaml | 4 ++ lm_eval/tasks/bbh/web_of_lies.yaml | 4 ++ lm_eval/tasks/bbh/word_sorting.yaml | 4 ++ 30 files changed, 199 insertions(+) create mode 100644 lm_eval/tasks/bbh/README.md create mode 100644 lm_eval/tasks/bbh/_generate_configs.py create mode 100644 lm_eval/tasks/bbh/_template_yaml create mode 100644 lm_eval/tasks/bbh/boolean_expressions.yaml create mode 100644 lm_eval/tasks/bbh/causal_judgement.yaml create mode 100644 lm_eval/tasks/bbh/date_understanding.yaml create mode 100644 lm_eval/tasks/bbh/disambiguation_qa.yaml create mode 100644 lm_eval/tasks/bbh/dyck_languages.yaml create mode 100644 lm_eval/tasks/bbh/formal_fallacies.yaml create mode 100644 lm_eval/tasks/bbh/geometric_shapes.yaml create mode 100644 lm_eval/tasks/bbh/hyperbaton.yaml create mode 100644 lm_eval/tasks/bbh/logical_deduction_five_objects.yaml create mode 100644 lm_eval/tasks/bbh/logical_deduction_seven_objects.yaml create mode 100644 lm_eval/tasks/bbh/logical_deduction_three_objects.yaml create mode 100644 lm_eval/tasks/bbh/movie_recommendation.yaml create mode 100644 lm_eval/tasks/bbh/multistep_arithmetic_two.yaml create mode 100644 lm_eval/tasks/bbh/navigate.yaml create mode 100644 lm_eval/tasks/bbh/object_counting.yaml create mode 100644 lm_eval/tasks/bbh/penguins_in_a_table.yaml create mode 100644 lm_eval/tasks/bbh/reasoning_about_colored_objects.yaml create mode 100644 lm_eval/tasks/bbh/ruin_names.yaml create mode 100644 lm_eval/tasks/bbh/salient_translation_error_detection.yaml create mode 100644 lm_eval/tasks/bbh/snarks.yaml create mode 100644 lm_eval/tasks/bbh/sports_understanding.yaml create mode 100644 lm_eval/tasks/bbh/temporal_sequences.yaml create mode 100644 lm_eval/tasks/bbh/tracking_shuffled_objects_five_objects.yaml create mode 100644 lm_eval/tasks/bbh/tracking_shuffled_objects_seven_objects.yaml create mode 100644 lm_eval/tasks/bbh/tracking_shuffled_objects_three_objects.yaml create mode 100644 lm_eval/tasks/bbh/web_of_lies.yaml create mode 100644 lm_eval/tasks/bbh/word_sorting.yaml diff --git a/lm_eval/tasks/bbh/README.md b/lm_eval/tasks/bbh/README.md new file mode 100644 index 00000000..91be60fc --- /dev/null +++ b/lm_eval/tasks/bbh/README.md @@ -0,0 +1,45 @@ +# BigBenchHard + +## Paper +Title: `Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them` +Abstract: https://arxiv.org/abs/2210.09261 + +A suite of 23 challenging BIG-Bench tasks which we call BIG-Bench Hard (BBH). +These are the task for which prior language model evaluations did not outperform +the average human-rater. + +Homepage: https://github.com/suzgunmirac/BIG-Bench-Hard + + +## Citation +``` +@article{suzgun2022challenging, + title={Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them}, + author={Suzgun, Mirac and Scales, Nathan and Sch{\"a}rli, Nathanael and Gehrmann, Sebastian and Tay, Yi and Chung, Hyung Won and Chowdhery, Aakanksha and Le, Quoc V and Chi, Ed H and Zhou, Denny and and Wei, Jason}, + journal={arXiv preprint arXiv:2210.09261}, + year={2022} +} +``` + +### Groups and Tasks + +#### Groups + +- `bbh` + +#### Tasks + +- ... + +### Checklist + +- [x] Is in Eval-harness v1.0 ? +- [ ] Has been checked for regression from v1.0? +- [ ] Has been checked for equivalence with original paper methodology? +- [ ] "Main" checked variant clearly denoted? + +### Variant Wishlist + +- [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation) +- [ ] Using Verifiers +- [ ] Majority voting "without CoT" diff --git a/lm_eval/tasks/bbh/_generate_configs.py b/lm_eval/tasks/bbh/_generate_configs.py new file mode 100644 index 00000000..80600809 --- /dev/null +++ b/lm_eval/tasks/bbh/_generate_configs.py @@ -0,0 +1,29 @@ +import yaml +import inspect +import datasets + +from tqdm import tqdm + + +def main() -> None: + + dataset_path = "lukaemon/bbh" + for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()): + file_name = f"{task}.yaml" + try: + with open(f"{file_name}", "w") as f: + f.write("# Generated by _generate_configs.py\n") + yaml.dump( + { + "include": "_template_yaml", + "task": f"{dataset_path.split('/')[-1]}_{task}", + "dataset_name": task, + }, + f, + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/lm_eval/tasks/bbh/_template_yaml b/lm_eval/tasks/bbh/_template_yaml new file mode 100644 index 00000000..af6b74b3 --- /dev/null +++ b/lm_eval/tasks/bbh/_template_yaml @@ -0,0 +1,17 @@ +group: bbh +dataset_path: lukaemon/bbh +output_type: greedy_until +test_split: test +doc_to_text: "{{input}}" +doc_to_target: "{{target}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false +generation_kwargs: + until: + - "\n\n" + do_sample: false + temperature: 0.0 diff --git a/lm_eval/tasks/bbh/boolean_expressions.yaml b/lm_eval/tasks/bbh/boolean_expressions.yaml new file mode 100644 index 00000000..d9895c81 --- /dev/null +++ b/lm_eval/tasks/bbh/boolean_expressions.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: boolean_expressions +include: _template_yaml +task: bbh_boolean_expressions diff --git a/lm_eval/tasks/bbh/causal_judgement.yaml b/lm_eval/tasks/bbh/causal_judgement.yaml new file mode 100644 index 00000000..c3d48d53 --- /dev/null +++ b/lm_eval/tasks/bbh/causal_judgement.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: causal_judgement +include: _template_yaml +task: bbh_causal_judgement diff --git a/lm_eval/tasks/bbh/date_understanding.yaml b/lm_eval/tasks/bbh/date_understanding.yaml new file mode 100644 index 00000000..5f60efbe --- /dev/null +++ b/lm_eval/tasks/bbh/date_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: date_understanding +include: _template_yaml +task: bbh_date_understanding diff --git a/lm_eval/tasks/bbh/disambiguation_qa.yaml b/lm_eval/tasks/bbh/disambiguation_qa.yaml new file mode 100644 index 00000000..b043460e --- /dev/null +++ b/lm_eval/tasks/bbh/disambiguation_qa.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: disambiguation_qa +include: _template_yaml +task: bbh_disambiguation_qa diff --git a/lm_eval/tasks/bbh/dyck_languages.yaml b/lm_eval/tasks/bbh/dyck_languages.yaml new file mode 100644 index 00000000..6b6648d0 --- /dev/null +++ b/lm_eval/tasks/bbh/dyck_languages.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: dyck_languages +include: _template_yaml +task: bbh_dyck_languages diff --git a/lm_eval/tasks/bbh/formal_fallacies.yaml b/lm_eval/tasks/bbh/formal_fallacies.yaml new file mode 100644 index 00000000..18d30c91 --- /dev/null +++ b/lm_eval/tasks/bbh/formal_fallacies.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: formal_fallacies +include: _template_yaml +task: bbh_formal_fallacies diff --git a/lm_eval/tasks/bbh/geometric_shapes.yaml b/lm_eval/tasks/bbh/geometric_shapes.yaml new file mode 100644 index 00000000..9616b6d6 --- /dev/null +++ b/lm_eval/tasks/bbh/geometric_shapes.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: geometric_shapes +include: _template_yaml +task: bbh_geometric_shapes diff --git a/lm_eval/tasks/bbh/hyperbaton.yaml b/lm_eval/tasks/bbh/hyperbaton.yaml new file mode 100644 index 00000000..d1ff5bf8 --- /dev/null +++ b/lm_eval/tasks/bbh/hyperbaton.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: hyperbaton +include: _template_yaml +task: bbh_hyperbaton diff --git a/lm_eval/tasks/bbh/logical_deduction_five_objects.yaml b/lm_eval/tasks/bbh/logical_deduction_five_objects.yaml new file mode 100644 index 00000000..91e6ec74 --- /dev/null +++ b/lm_eval/tasks/bbh/logical_deduction_five_objects.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: logical_deduction_five_objects +include: _template_yaml +task: bbh_logical_deduction_five_objects diff --git a/lm_eval/tasks/bbh/logical_deduction_seven_objects.yaml b/lm_eval/tasks/bbh/logical_deduction_seven_objects.yaml new file mode 100644 index 00000000..342cf563 --- /dev/null +++ b/lm_eval/tasks/bbh/logical_deduction_seven_objects.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: logical_deduction_seven_objects +include: _template_yaml +task: bbh_logical_deduction_seven_objects diff --git a/lm_eval/tasks/bbh/logical_deduction_three_objects.yaml b/lm_eval/tasks/bbh/logical_deduction_three_objects.yaml new file mode 100644 index 00000000..6669c6c8 --- /dev/null +++ b/lm_eval/tasks/bbh/logical_deduction_three_objects.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: logical_deduction_three_objects +include: _template_yaml +task: bbh_logical_deduction_three_objects diff --git a/lm_eval/tasks/bbh/movie_recommendation.yaml b/lm_eval/tasks/bbh/movie_recommendation.yaml new file mode 100644 index 00000000..af884eec --- /dev/null +++ b/lm_eval/tasks/bbh/movie_recommendation.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: movie_recommendation +include: _template_yaml +task: bbh_movie_recommendation diff --git a/lm_eval/tasks/bbh/multistep_arithmetic_two.yaml b/lm_eval/tasks/bbh/multistep_arithmetic_two.yaml new file mode 100644 index 00000000..2ab191b8 --- /dev/null +++ b/lm_eval/tasks/bbh/multistep_arithmetic_two.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: multistep_arithmetic_two +include: _template_yaml +task: bbh_multistep_arithmetic_two diff --git a/lm_eval/tasks/bbh/navigate.yaml b/lm_eval/tasks/bbh/navigate.yaml new file mode 100644 index 00000000..f737a9c5 --- /dev/null +++ b/lm_eval/tasks/bbh/navigate.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: navigate +include: _template_yaml +task: bbh_navigate diff --git a/lm_eval/tasks/bbh/object_counting.yaml b/lm_eval/tasks/bbh/object_counting.yaml new file mode 100644 index 00000000..606bd92d --- /dev/null +++ b/lm_eval/tasks/bbh/object_counting.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: object_counting +include: _template_yaml +task: bbh_object_counting diff --git a/lm_eval/tasks/bbh/penguins_in_a_table.yaml b/lm_eval/tasks/bbh/penguins_in_a_table.yaml new file mode 100644 index 00000000..25e183ce --- /dev/null +++ b/lm_eval/tasks/bbh/penguins_in_a_table.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: penguins_in_a_table +include: _template_yaml +task: bbh_penguins_in_a_table diff --git a/lm_eval/tasks/bbh/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bbh/reasoning_about_colored_objects.yaml new file mode 100644 index 00000000..785e0b2e --- /dev/null +++ b/lm_eval/tasks/bbh/reasoning_about_colored_objects.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: reasoning_about_colored_objects +include: _template_yaml +task: bbh_reasoning_about_colored_objects diff --git a/lm_eval/tasks/bbh/ruin_names.yaml b/lm_eval/tasks/bbh/ruin_names.yaml new file mode 100644 index 00000000..aef28b1c --- /dev/null +++ b/lm_eval/tasks/bbh/ruin_names.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: ruin_names +include: _template_yaml +task: bbh_ruin_names diff --git a/lm_eval/tasks/bbh/salient_translation_error_detection.yaml b/lm_eval/tasks/bbh/salient_translation_error_detection.yaml new file mode 100644 index 00000000..433867fe --- /dev/null +++ b/lm_eval/tasks/bbh/salient_translation_error_detection.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: salient_translation_error_detection +include: _template_yaml +task: bbh_salient_translation_error_detection diff --git a/lm_eval/tasks/bbh/snarks.yaml b/lm_eval/tasks/bbh/snarks.yaml new file mode 100644 index 00000000..49f57d20 --- /dev/null +++ b/lm_eval/tasks/bbh/snarks.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: snarks +include: _template_yaml +task: bbh_snarks diff --git a/lm_eval/tasks/bbh/sports_understanding.yaml b/lm_eval/tasks/bbh/sports_understanding.yaml new file mode 100644 index 00000000..cf84b1e1 --- /dev/null +++ b/lm_eval/tasks/bbh/sports_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: sports_understanding +include: _template_yaml +task: bbh_sports_understanding diff --git a/lm_eval/tasks/bbh/temporal_sequences.yaml b/lm_eval/tasks/bbh/temporal_sequences.yaml new file mode 100644 index 00000000..b3f5c0af --- /dev/null +++ b/lm_eval/tasks/bbh/temporal_sequences.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: temporal_sequences +include: _template_yaml +task: bbh_temporal_sequences diff --git a/lm_eval/tasks/bbh/tracking_shuffled_objects_five_objects.yaml b/lm_eval/tasks/bbh/tracking_shuffled_objects_five_objects.yaml new file mode 100644 index 00000000..d4ca2fe0 --- /dev/null +++ b/lm_eval/tasks/bbh/tracking_shuffled_objects_five_objects.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: tracking_shuffled_objects_five_objects +include: _template_yaml +task: bbh_tracking_shuffled_objects_five_objects diff --git a/lm_eval/tasks/bbh/tracking_shuffled_objects_seven_objects.yaml b/lm_eval/tasks/bbh/tracking_shuffled_objects_seven_objects.yaml new file mode 100644 index 00000000..20fff820 --- /dev/null +++ b/lm_eval/tasks/bbh/tracking_shuffled_objects_seven_objects.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: tracking_shuffled_objects_seven_objects +include: _template_yaml +task: bbh_tracking_shuffled_objects_seven_objects diff --git a/lm_eval/tasks/bbh/tracking_shuffled_objects_three_objects.yaml b/lm_eval/tasks/bbh/tracking_shuffled_objects_three_objects.yaml new file mode 100644 index 00000000..f219d30c --- /dev/null +++ b/lm_eval/tasks/bbh/tracking_shuffled_objects_three_objects.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: tracking_shuffled_objects_three_objects +include: _template_yaml +task: bbh_tracking_shuffled_objects_three_objects diff --git a/lm_eval/tasks/bbh/web_of_lies.yaml b/lm_eval/tasks/bbh/web_of_lies.yaml new file mode 100644 index 00000000..18dcb591 --- /dev/null +++ b/lm_eval/tasks/bbh/web_of_lies.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: web_of_lies +include: _template_yaml +task: bbh_web_of_lies diff --git a/lm_eval/tasks/bbh/word_sorting.yaml b/lm_eval/tasks/bbh/word_sorting.yaml new file mode 100644 index 00000000..11725e0a --- /dev/null +++ b/lm_eval/tasks/bbh/word_sorting.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: word_sorting +include: _template_yaml +task: bbh_word_sorting -- GitLab From 525f1d1593b754cca4c9282e4f484432708db035 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 30 Aug 2023 16:36:05 +0000 Subject: [PATCH 091/264] added more flan subtasks --- lm_eval/benchmarks/flan/flan_cot_yaml | 37 ------------------- .../flan/prompt_templates/flan_bbh.yaml | 29 +++++++++++++++ lm_eval/benchmarks/flan_cot.yaml | 11 ++++++ lm_eval/benchmarks/flan_held_in.yaml | 12 +++--- lm_eval/benchmarks/flan_held_out.yaml | 4 ++ 5 files changed, 50 insertions(+), 43 deletions(-) delete mode 100644 lm_eval/benchmarks/flan/flan_cot_yaml create mode 100644 lm_eval/benchmarks/flan/prompt_templates/flan_bbh.yaml create mode 100644 lm_eval/benchmarks/flan_cot.yaml create mode 100644 lm_eval/benchmarks/flan_held_out.yaml diff --git a/lm_eval/benchmarks/flan/flan_cot_yaml b/lm_eval/benchmarks/flan/flan_cot_yaml deleted file mode 100644 index 75ac4a10..00000000 --- a/lm_eval/benchmarks/flan/flan_cot_yaml +++ /dev/null @@ -1,37 +0,0 @@ -group: flan_cot -task: - - include: cot_template_yaml - dataset_path: super_glue - dataset_name: boolq - use_prompt: promptsource:* - validation_split: validation - - include: cot_template_yaml - dataset_path: super_glue - dataset_name: rte - use_prompt: promptsource:* - validation_split: validation - - include: cot_template_yaml - task: anli_r1 - dataset_path: anli - use_prompt: promptsource:* - validation_split: dev_r1 - - include: cot_template_yaml - task: anli_r2 - dataset_path: anli - use_prompt: promptsource:* - validation_split: dev_r2 - - include: cot_template_yaml - task: anli_r3 - dataset_path: anli - use_prompt: promptsource:* - validation_split: dev_r3 - - include: cot_template_yaml - task: ai2_arc - dataset_path: ARC-Easy - use_prompt: promptsource:* - validation_split: validation - - include: cot_template_yaml - task: ai2_arc - dataset_path: ARC-Challange - use_prompt: promptsource:* - validation_split: validation diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_bbh.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_bbh.yaml new file mode 100644 index 00000000..525e9e0c --- /dev/null +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_bbh.yaml @@ -0,0 +1,29 @@ +# Flan Prompt Templates +prompts: + "template-0": + doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + "template-1": + doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + "template-2": + doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + "template-3": + doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + "template-4": + doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + "template-5": + doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + "template-6": + doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + "template-7": + doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + "template-8": + doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" diff --git a/lm_eval/benchmarks/flan_cot.yaml b/lm_eval/benchmarks/flan_cot.yaml new file mode 100644 index 00000000..ff6edc24 --- /dev/null +++ b/lm_eval/benchmarks/flan_cot.yaml @@ -0,0 +1,11 @@ +group: flan_cot +task: + - include: flan/yaml_templates/cot_template_yaml + dataset_path: gsmk + dataset_name: boolq + use_prompt: promptsource:* + validation_split: validation + - include: flan/yaml_templates/cot_template_yaml + dataset_path: EleutherAI/asdiv + use_prompt: promptsource:* + validation_split: validation diff --git a/lm_eval/benchmarks/flan_held_in.yaml b/lm_eval/benchmarks/flan_held_in.yaml index 91979f9f..a560bda8 100644 --- a/lm_eval/benchmarks/flan_held_in.yaml +++ b/lm_eval/benchmarks/flan_held_in.yaml @@ -5,32 +5,32 @@ task: dataset_name: boolq use_prompt: flan/prompt_templates/flan_boolq.yaml:* validation_split: validation - - include: flan/yaml_templates//held_in_template_yaml + - include: flan/yaml_templates/held_in_template_yaml dataset_path: super_glue dataset_name: rte use_prompt: flan/prompt_templates/flan_rte.yaml:* validation_split: validation - - include: flan/yaml_templates//held_in_template_yaml + - include: flan/yaml_templates/held_in_template_yaml task: anli_r1 dataset_path: anli use_prompt: flan/prompt_templates/flan_anli.yaml:* validation_split: dev_r1 - - include: flan/yaml_templates//held_in_template_yaml + - include: flan/yaml_templates/held_in_template_yaml task: anli_r2 dataset_path: anli use_prompt: flan/prompt_templates/flan_anli.yaml:* validation_split: dev_r2 - - include: flan/yaml_templates//held_in_template_yaml + - include: flan/yaml_templates/held_in_template_yaml task: anli_r3 dataset_path: anli use_prompt: flan/prompt_templates/flan_anli.yaml:* validation_split: dev_r3 - # - include: flan/yaml_templates//held_in_template_yaml + # - include: flan/yaml_templates/held_in_template_yaml # task: ai2_arc # dataset_path: ARC-Easy # use_prompt: local:* # validation_split: validation - # - include: flan/yaml_templates//held_in_template_yaml + # - include: flan/yaml_templates/held_in_template_yaml # task: ai2_arc # dataset_path: ARC-Challange # use_prompt: local:* diff --git a/lm_eval/benchmarks/flan_held_out.yaml b/lm_eval/benchmarks/flan_held_out.yaml new file mode 100644 index 00000000..4cd56468 --- /dev/null +++ b/lm_eval/benchmarks/flan_held_out.yaml @@ -0,0 +1,4 @@ +group: flan_held_out +task: + - bbh + - mmlu -- GitLab From 788b6f9432240f5d5c260a6712b4b720ae6a23dc Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Wed, 30 Aug 2023 18:11:36 +0100 Subject: [PATCH 092/264] moved API inside function; API expects json --- lm_eval/tasks/realtoxicityprompts/metric.py | 25 ++++++++++----------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/lm_eval/tasks/realtoxicityprompts/metric.py b/lm_eval/tasks/realtoxicityprompts/metric.py index 255f55f9..42f678ea 100644 --- a/lm_eval/tasks/realtoxicityprompts/metric.py +++ b/lm_eval/tasks/realtoxicityprompts/metric.py @@ -1,21 +1,10 @@ import os -import sys import json import requests import numpy as np from lm_eval.logger import eval_logger -try: - API_KEY = os.environ["PERSPECTIVE_API_KEY"] -except KeyError: - API_KEY = None - eval_logger.warning( - "PERSPECTIVE_API_KEY is not set. If you are running the `realtoxicityprompts` task, please set this environment variable." - ) - -url = f"https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze?key={API_KEY}" - def toxicity_perspective_api(references, predictions, **kwargs): """Toxicity Perspective API is a metric which uses Perspective API to score the toxicity of a generated sentence. @@ -25,6 +14,16 @@ def toxicity_perspective_api(references, predictions, **kwargs): Lower is better """ + try: + API_KEY = os.environ["PERSPECTIVE_API_KEY"] + except KeyError: + eval_logger.warning( + "PERSPECTIVE_API_KEY is not set. If you are running the `realtoxicityprompts` task, please set this environment variable." + ) + raise + + url = f"https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze?key={API_KEY}" + scores = [] for pred in predictions: data = { @@ -35,7 +34,7 @@ def toxicity_perspective_api(references, predictions, **kwargs): headers = { "content-type": "application/json", } - req_response = requests.post(url, data=data, headers=headers) + req_response = requests.post(url, json=data, headers=headers) if req_response.ok: response = json.loads(req_response.text) if ( @@ -54,6 +53,6 @@ def toxicity_perspective_api(references, predictions, **kwargs): raise SystemExit(0) else: eval_logger.error("Unhandled Exception") - raise SystemExit(0) + req_response.raise_for_status() return np.mean(scores) -- GitLab From c3764d2c31bfa0e45c8f57fb200c7b4d2642e8c8 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 31 Aug 2023 11:38:41 +0000 Subject: [PATCH 093/264] update to generate bbh configs --- .../flan/prompt_templates/flan_anli.yaml | 18 +++++++++--------- lm_eval/tasks/bbh/_generate_configs.py | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml index 525e9e0c..7dae0ce0 100644 --- a/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml @@ -2,28 +2,28 @@ prompts: "template-0": doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_target: """{{["Yes", "It's impossible to say", "No"][label]}}""" "template-1": doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" "template-2": doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" "template-3": doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" "template-4": doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" "template-5": doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" "template-6": doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" "template-7": doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" "template-8": doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" diff --git a/lm_eval/tasks/bbh/_generate_configs.py b/lm_eval/tasks/bbh/_generate_configs.py index 80600809..40e4c07d 100644 --- a/lm_eval/tasks/bbh/_generate_configs.py +++ b/lm_eval/tasks/bbh/_generate_configs.py @@ -26,4 +26,4 @@ def main() -> None: if __name__ == "__main__": - main() \ No newline at end of file + main() -- GitLab From 6f42ab7c111946892de4d0402aa4f83895f3edfd Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 31 Aug 2023 12:01:07 +0000 Subject: [PATCH 094/264] detect if a task was not found --- main.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/main.py b/main.py index d54bd4f4..43ee6cc6 100644 --- a/main.py +++ b/main.py @@ -127,15 +127,18 @@ def main(): else: tasks_list = args.tasks.split(",") task_names = utils.pattern_match(tasks_list, ALL_TASKS) + task_missing = [] for task in [task for task in tasks_list if task not in task_names]: if os.path.isfile(task): config = utils.load_yaml_config(task) task_names.append(config) + else: + task_missing.append(task) - if task_names == []: + if task_missing != []: eval_logger.error( - "No tasks were detected,\n" - f"{SPACING}Try `lm-eval -h` for list of available tasks", + "Tasks were not found: {}\n" + "{}Try `lm-eval -h` for list of available tasks".format(", ".join(task_missing), SPACING) ) if args.output_path: -- GitLab From 2e4b3c99c22d8ac1017db3fb872a839d2b853e2c Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 31 Aug 2023 13:29:21 +0000 Subject: [PATCH 095/264] modifed error message --- main.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/main.py b/main.py index 43ee6cc6..4fb3a402 100644 --- a/main.py +++ b/main.py @@ -136,10 +136,13 @@ def main(): task_missing.append(task) if task_missing != []: + missing = ", ".join(task_missing), eval_logger.error( - "Tasks were not found: {}\n" - "{}Try `lm-eval -h` for list of available tasks".format(", ".join(task_missing), SPACING) + f"Tasks were not found: {missing}\n" + f"{SPACING}Try `lm-eval -h` for list of available tasks", ) + raise ValueError(f"Tasks {missing} were not found.") + if args.output_path: path = Path(args.output_path) -- GitLab From 3c842795640f32cb6e01b58103d0e108f6a43d59 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 31 Aug 2023 13:35:36 +0000 Subject: [PATCH 096/264] modifed error message --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index 4fb3a402..f9efd092 100644 --- a/main.py +++ b/main.py @@ -136,7 +136,7 @@ def main(): task_missing.append(task) if task_missing != []: - missing = ", ".join(task_missing), + missing = ", ".join(task_missing) eval_logger.error( f"Tasks were not found: {missing}\n" f"{SPACING}Try `lm-eval -h` for list of available tasks", -- GitLab From d01cc479194551d0e4eb10c05a8f38eb44dbb550 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 31 Aug 2023 13:36:19 +0000 Subject: [PATCH 097/264] modifed error message --- main.py | 1 - 1 file changed, 1 deletion(-) diff --git a/main.py b/main.py index f9efd092..b254d7d8 100644 --- a/main.py +++ b/main.py @@ -143,7 +143,6 @@ def main(): ) raise ValueError(f"Tasks {missing} were not found.") - if args.output_path: path = Path(args.output_path) # check if file or 'dir/results.json' exists -- GitLab From a81ef1a798691b2e02129c7496f4c5883c951611 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Fri, 1 Sep 2023 12:30:40 +0000 Subject: [PATCH 098/264] edit stop token --- lm_eval/tasks/super_glue/boolq/t5-prompt.yaml | 3 +++ lm_eval/tasks/super_glue/cb/t5-prompt.yaml | 3 +++ lm_eval/tasks/super_glue/cb/t5_utils.py | 2 +- lm_eval/tasks/super_glue/copa/t5-prompt.yaml | 3 +++ lm_eval/tasks/super_glue/multirc/t5-prompt.yaml | 2 -- lm_eval/tasks/super_glue/record/t5-prompt.yaml | 3 +++ lm_eval/tasks/super_glue/rte/t5-prompt.yaml | 3 +++ lm_eval/tasks/super_glue/wic/t5-prompt.yaml | 3 +++ lm_eval/tasks/super_glue/wsc/t5-prompt.yaml | 3 +++ 9 files changed, 22 insertions(+), 3 deletions(-) diff --git a/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml b/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml index bda3e614..8ebd82fb 100644 --- a/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml @@ -9,6 +9,9 @@ output_type: greedy_until doc_to_text: "boolq passage: {{passage}} question: {{question}}" doc_to_target: label doc_to_choice: ['False', 'True'] +generation_kwargs: + until: + - "" metric_list: - metric: exact_match aggregation: mean diff --git a/lm_eval/tasks/super_glue/cb/t5-prompt.yaml b/lm_eval/tasks/super_glue/cb/t5-prompt.yaml index 61b0d8a4..a16505fa 100644 --- a/lm_eval/tasks/super_glue/cb/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/cb/t5-prompt.yaml @@ -9,6 +9,9 @@ output_type: greedy_until doc_to_text: "cb hypothesis: {{hypothesis}} premise: {{premise}}" doc_to_target: label doc_to_choice: ['entailment', 'contradiction', 'neutral'] +generation_kwargs: + until: + - "" metric_list: - metric: exact_match aggregation: mean diff --git a/lm_eval/tasks/super_glue/cb/t5_utils.py b/lm_eval/tasks/super_glue/cb/t5_utils.py index caf84390..644c2111 100644 --- a/lm_eval/tasks/super_glue/cb/t5_utils.py +++ b/lm_eval/tasks/super_glue/cb/t5_utils.py @@ -4,7 +4,7 @@ import sklearn.metrics def mean_3class_f1(predictions, references): # This is a passthrough function string_label = ["entailment", "contradiction", "neutral"] - predictions = string_label.index(predictions[0]) + predictions = string_label.index(predictions[0]) if predictions[0] in string_label else 0 references = string_label.index(references[0]) return (predictions, references) diff --git a/lm_eval/tasks/super_glue/copa/t5-prompt.yaml b/lm_eval/tasks/super_glue/copa/t5-prompt.yaml index e3f6f04a..47aaf275 100644 --- a/lm_eval/tasks/super_glue/copa/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/copa/t5-prompt.yaml @@ -9,6 +9,9 @@ output_type: greedy_until doc_to_text: "copa choice1: {{choice1}} choice2: {{choice2}} premise: {{premise}} question: {{question}}" doc_to_target: label doc_to_choice: ['choice1', 'choice2'] +generation_kwargs: + until: + - "" metric_list: - metric: exact_match aggregation: mean diff --git a/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml b/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml index d0a62652..008c1443 100644 --- a/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml @@ -12,8 +12,6 @@ doc_to_choice: "{% set group_id = idx.question|string %}{{[group_id+'_False', gr generation_kwargs: until: - "" - do_sample: false - temperature: 0.5 metric_list: - metric: !function t5_utils.f1 aggregation: !function t5_utils.agg_f1 diff --git a/lm_eval/tasks/super_glue/record/t5-prompt.yaml b/lm_eval/tasks/super_glue/record/t5-prompt.yaml index 82dcc383..c1db59ad 100644 --- a/lm_eval/tasks/super_glue/record/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/record/t5-prompt.yaml @@ -8,6 +8,9 @@ output_type: greedy_until process_docs: !function t5_utils.process_docs doc_to_text: !function t5_utils.doc_to_text doc_to_target: "{{idx.passage|string}}+{{idx.query}}_{{answers}}" +generation_kwargs: + until: + - "" metric_list: - metric: !function t5_utils.em aggregation: !function t5_utils.squad_em_agg diff --git a/lm_eval/tasks/super_glue/rte/t5-prompt.yaml b/lm_eval/tasks/super_glue/rte/t5-prompt.yaml index 2725b70e..870dc363 100644 --- a/lm_eval/tasks/super_glue/rte/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/rte/t5-prompt.yaml @@ -9,6 +9,9 @@ output_type: greedy_until doc_to_text: "rte hypothesis: {{hypothesis}} premise: {{premise}}" doc_to_target: label doc_to_choice: ['entailment', 'not_entailment'] +generation_kwargs: + until: + - "" metric_list: - metric: exact_match aggregation: mean diff --git a/lm_eval/tasks/super_glue/wic/t5-prompt.yaml b/lm_eval/tasks/super_glue/wic/t5-prompt.yaml index a48cfaee..da6a9411 100644 --- a/lm_eval/tasks/super_glue/wic/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/wic/t5-prompt.yaml @@ -9,6 +9,9 @@ output_type: greedy_until doc_to_text: "wic sentence1: {{sentence1}} sentence2: {{sentence2}} word: {{word}}" doc_to_target: label doc_to_choice: ['False', 'True'] +generation_kwargs: + until: + - "" metric_list: - metric: exact_match aggregation: mean diff --git a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml index 38c73214..e0ef7538 100644 --- a/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml +++ b/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml @@ -8,6 +8,9 @@ validation_split: validation output_type: greedy_until doc_to_text: !function "t5_utils.doc_to_text" doc_to_target: label +generation_kwargs: + until: + - "" metric_list: - metric: accuracy aggregation: mean -- GitLab From f23ae748125513cb6d23254b14c33fee23a567d6 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Sun, 3 Sep 2023 12:12:13 +0000 Subject: [PATCH 099/264] add mmlu variants --- lm_eval/tasks/mmlu/_cot_prompts.json | 1 + ...gen_all_splits.py => _generate_configs.py} | 24 ++- .../hendrycks_test_original_default.yaml | 0 .../_mmlu_flan_cot_fewshot_template_yaml | 25 +++ .../mmlu_abstract_algebra.yaml | 40 ++++ .../mmlu/flan_cot_fewshot/mmlu_anatomy.yaml | 57 ++++++ .../mmlu/flan_cot_fewshot/mmlu_astronomy.yaml | 54 +++++ .../mmlu_business_ethics.yaml | 55 +++++ .../mmlu_clinical_knowledge.yaml | 58 ++++++ .../mmlu_college_biology.yaml | 60 ++++++ .../mmlu_college_chemistry.yaml | 37 ++++ .../mmlu_college_computer_science.yaml | 189 ++++++++++++++++++ .../mmlu_college_mathematics.yaml | 49 +++++ .../mmlu_college_medicine.yaml | 52 +++++ .../mmlu_college_physics.yaml | 70 +++++++ .../mmlu_computer_security.yaml | 35 ++++ .../mmlu_conceptual_physics.yaml | 32 +++ .../flan_cot_fewshot/mmlu_econometrics.yaml | 63 ++++++ .../mmlu_electrical_engineering.yaml | 34 ++++ .../mmlu_elementary_mathematics.yaml | 40 ++++ .../flan_cot_fewshot/mmlu_formal_logic.yaml | 57 ++++++ .../flan_cot_fewshot/mmlu_global_facts.yaml | 33 +++ .../mmlu_high_school_biology.yaml | 54 +++++ .../mmlu_high_school_chemistry.yaml | 49 +++++ .../mmlu_high_school_computer_science.yaml | 70 +++++++ .../mmlu_high_school_european_history.yaml | 168 ++++++++++++++++ .../mmlu_high_school_geography.yaml | 63 ++++++ ...u_high_school_government_and_politics.yaml | 67 +++++++ .../mmlu_high_school_macroeconomics.yaml | 64 ++++++ .../mmlu_high_school_mathematics.yaml | 36 ++++ .../mmlu_high_school_microeconomics.yaml | 63 ++++++ .../mmlu_high_school_physics.yaml | 38 ++++ .../mmlu_high_school_psychology.yaml | 72 +++++++ .../mmlu_high_school_statistics.yaml | 88 ++++++++ .../mmlu_high_school_us_history.yaml | 133 ++++++++++++ .../mmlu_high_school_world_history.yaml | 82 ++++++++ .../flan_cot_fewshot/mmlu_human_aging.yaml | 48 +++++ .../mmlu_human_sexuality.yaml | 61 ++++++ .../mmlu_international_law.yaml | 80 ++++++++ .../flan_cot_fewshot/mmlu_jurisprudence.yaml | 69 +++++++ .../mmlu_logical_fallacies.yaml | 71 +++++++ .../mmlu_machine_learning.yaml | 59 ++++++ .../flan_cot_fewshot/mmlu_management.yaml | 54 +++++ .../mmlu/flan_cot_fewshot/mmlu_marketing.yaml | 66 ++++++ .../mmlu_medical_genetics.yaml | 61 ++++++ .../flan_cot_fewshot/mmlu_miscellaneous.yaml | 54 +++++ .../flan_cot_fewshot/mmlu_moral_disputes.yaml | 72 +++++++ .../mmlu_moral_scenarios.yaml | 66 ++++++ .../mmlu/flan_cot_fewshot/mmlu_nutrition.yaml | 72 +++++++ .../flan_cot_fewshot/mmlu_philosophy.yaml | 30 +++ .../flan_cot_fewshot/mmlu_prehistory.yaml | 67 +++++++ .../mmlu_professional_accounting.yaml | 47 +++++ .../mmlu_professional_law.yaml | 105 ++++++++++ .../mmlu_professional_medicine.yaml | 69 +++++++ .../mmlu_professional_psychology.yaml | 47 +++++ .../mmlu_public_relations.yaml | 65 ++++++ .../mmlu_security_studies.yaml | 85 ++++++++ .../mmlu/flan_cot_fewshot/mmlu_sociology.yaml | 67 +++++++ .../mmlu_us_foreign_policy.yaml | 66 ++++++ .../mmlu/flan_cot_fewshot/mmlu_virology.yaml | 55 +++++ .../mmlu_world_religions.yaml | 53 +++++ .../_mmlu_flan_generative_template_yaml | 25 +++ .../mmlu_abstract_algebra.yaml | 8 + .../mmlu/flan_cot_zeroshot/mmlu_anatomy.yaml | 7 + .../flan_cot_zeroshot/mmlu_astronomy.yaml | 7 + .../mmlu_business_ethics.yaml | 8 + .../mmlu_clinical_knowledge.yaml | 8 + .../mmlu_college_biology.yaml | 8 + .../mmlu_college_chemistry.yaml | 8 + .../mmlu_college_computer_science.yaml | 8 + .../mmlu_college_mathematics.yaml | 8 + .../mmlu_college_medicine.yaml | 8 + .../mmlu_college_physics.yaml | 8 + .../mmlu_computer_security.yaml | 8 + .../mmlu_conceptual_physics.yaml | 8 + .../flan_cot_zeroshot/mmlu_econometrics.yaml | 7 + .../mmlu_electrical_engineering.yaml | 8 + .../mmlu_elementary_mathematics.yaml | 8 + .../flan_cot_zeroshot/mmlu_formal_logic.yaml | 8 + .../flan_cot_zeroshot/mmlu_global_facts.yaml | 8 + .../mmlu_high_school_biology.yaml | 8 + .../mmlu_high_school_chemistry.yaml | 8 + .../mmlu_high_school_computer_science.yaml | 8 + .../mmlu_high_school_european_history.yaml | 8 + .../mmlu_high_school_geography.yaml | 8 + ...u_high_school_government_and_politics.yaml | 8 + .../mmlu_high_school_macroeconomics.yaml | 8 + .../mmlu_high_school_mathematics.yaml | 8 + .../mmlu_high_school_microeconomics.yaml | 8 + .../mmlu_high_school_physics.yaml | 8 + .../mmlu_high_school_psychology.yaml | 8 + .../mmlu_high_school_statistics.yaml | 8 + .../mmlu_high_school_us_history.yaml | 8 + .../mmlu_high_school_world_history.yaml | 8 + .../flan_cot_zeroshot/mmlu_human_aging.yaml | 8 + .../mmlu_human_sexuality.yaml | 8 + .../mmlu_international_law.yaml | 8 + .../flan_cot_zeroshot/mmlu_jurisprudence.yaml | 7 + .../mmlu_logical_fallacies.yaml | 8 + .../mmlu_machine_learning.yaml | 8 + .../flan_cot_zeroshot/mmlu_management.yaml | 7 + .../flan_cot_zeroshot/mmlu_marketing.yaml | 7 + .../mmlu_medical_genetics.yaml | 8 + .../flan_cot_zeroshot/mmlu_miscellaneous.yaml | 7 + .../mmlu_moral_disputes.yaml | 8 + .../mmlu_moral_scenarios.yaml | 8 + .../flan_cot_zeroshot/mmlu_nutrition.yaml | 7 + .../flan_cot_zeroshot/mmlu_philosophy.yaml | 7 + .../flan_cot_zeroshot/mmlu_prehistory.yaml | 7 + .../mmlu_professional_accounting.yaml | 8 + .../mmlu_professional_law.yaml | 8 + .../mmlu_professional_medicine.yaml | 8 + .../mmlu_professional_psychology.yaml | 8 + .../mmlu_public_relations.yaml | 8 + .../mmlu_security_studies.yaml | 8 + .../flan_cot_zeroshot/mmlu_sociology.yaml | 7 + .../mmlu_us_foreign_policy.yaml | 8 + .../mmlu/flan_cot_zeroshot/mmlu_virology.yaml | 7 + .../mmlu_world_religions.yaml | 8 + .../_mmlu_flan_generative_template_yaml | 18 ++ .../_mmlu_flan_loglikelihood_template_yaml | 12 ++ .../flan_n_shot/mmlu_abstract_algebra.yaml | 8 + .../tasks/mmlu/flan_n_shot/mmlu_anatomy.yaml | 7 + .../mmlu/flan_n_shot/mmlu_astronomy.yaml | 7 + .../flan_n_shot/mmlu_business_ethics.yaml | 8 + .../flan_n_shot/mmlu_clinical_knowledge.yaml | 8 + .../flan_n_shot/mmlu_college_biology.yaml | 8 + .../flan_n_shot/mmlu_college_chemistry.yaml | 8 + .../mmlu_college_computer_science.yaml | 8 + .../flan_n_shot/mmlu_college_mathematics.yaml | 8 + .../flan_n_shot/mmlu_college_medicine.yaml | 8 + .../flan_n_shot/mmlu_college_physics.yaml | 8 + .../flan_n_shot/mmlu_computer_security.yaml | 8 + .../flan_n_shot/mmlu_conceptual_physics.yaml | 8 + .../mmlu/flan_n_shot/mmlu_econometrics.yaml | 7 + .../mmlu_electrical_engineering.yaml | 8 + .../mmlu_elementary_mathematics.yaml | 8 + .../mmlu/flan_n_shot/mmlu_formal_logic.yaml | 8 + .../mmlu/flan_n_shot/mmlu_global_facts.yaml | 8 + .../flan_n_shot/mmlu_high_school_biology.yaml | 8 + .../mmlu_high_school_chemistry.yaml | 8 + .../mmlu_high_school_computer_science.yaml | 8 + .../mmlu_high_school_european_history.yaml | 8 + .../mmlu_high_school_geography.yaml | 8 + ...u_high_school_government_and_politics.yaml | 8 + .../mmlu_high_school_macroeconomics.yaml | 8 + .../mmlu_high_school_mathematics.yaml | 8 + .../mmlu_high_school_microeconomics.yaml | 8 + .../flan_n_shot/mmlu_high_school_physics.yaml | 8 + .../mmlu_high_school_psychology.yaml | 8 + .../mmlu_high_school_statistics.yaml | 8 + .../mmlu_high_school_us_history.yaml | 8 + .../mmlu_high_school_world_history.yaml | 8 + .../mmlu/flan_n_shot/mmlu_human_aging.yaml | 8 + .../flan_n_shot/mmlu_human_sexuality.yaml | 8 + .../flan_n_shot/mmlu_international_law.yaml | 8 + .../mmlu/flan_n_shot/mmlu_jurisprudence.yaml | 7 + .../flan_n_shot/mmlu_logical_fallacies.yaml | 8 + .../mmlu_loglikelihood_abstract_algebra.yaml | 8 + .../mmlu_loglikelihood_anatomy.yaml | 7 + .../mmlu_loglikelihood_astronomy.yaml | 7 + .../mmlu_loglikelihood_business_ethics.yaml | 8 + ...mmlu_loglikelihood_clinical_knowledge.yaml | 8 + .../mmlu_loglikelihood_college_biology.yaml | 8 + .../mmlu_loglikelihood_college_chemistry.yaml | 8 + ...oglikelihood_college_computer_science.yaml | 8 + ...mlu_loglikelihood_college_mathematics.yaml | 8 + .../mmlu_loglikelihood_college_medicine.yaml | 8 + .../mmlu_loglikelihood_college_physics.yaml | 8 + .../mmlu_loglikelihood_computer_security.yaml | 8 + ...mmlu_loglikelihood_conceptual_physics.yaml | 8 + .../mmlu_loglikelihood_econometrics.yaml | 7 + ..._loglikelihood_electrical_engineering.yaml | 8 + ..._loglikelihood_elementary_mathematics.yaml | 8 + .../mmlu_loglikelihood_formal_logic.yaml | 8 + .../mmlu_loglikelihood_global_facts.yaml | 8 + ...mlu_loglikelihood_high_school_biology.yaml | 8 + ...u_loglikelihood_high_school_chemistry.yaml | 8 + ...kelihood_high_school_computer_science.yaml | 8 + ...kelihood_high_school_european_history.yaml | 8 + ...u_loglikelihood_high_school_geography.yaml | 8 + ...d_high_school_government_and_politics.yaml | 8 + ...likelihood_high_school_macroeconomics.yaml | 8 + ...loglikelihood_high_school_mathematics.yaml | 8 + ...likelihood_high_school_microeconomics.yaml | 8 + ...mlu_loglikelihood_high_school_physics.yaml | 8 + ..._loglikelihood_high_school_psychology.yaml | 8 + ..._loglikelihood_high_school_statistics.yaml | 8 + ..._loglikelihood_high_school_us_history.yaml | 8 + ...glikelihood_high_school_world_history.yaml | 8 + .../mmlu_loglikelihood_human_aging.yaml | 8 + .../mmlu_loglikelihood_human_sexuality.yaml | 8 + .../mmlu_loglikelihood_international_law.yaml | 8 + .../mmlu_loglikelihood_jurisprudence.yaml | 7 + .../mmlu_loglikelihood_logical_fallacies.yaml | 8 + .../mmlu_loglikelihood_machine_learning.yaml | 8 + .../mmlu_loglikelihood_management.yaml | 7 + .../mmlu_loglikelihood_marketing.yaml | 7 + .../mmlu_loglikelihood_medical_genetics.yaml | 8 + .../mmlu_loglikelihood_miscellaneous.yaml | 7 + .../mmlu_loglikelihood_moral_disputes.yaml | 8 + .../mmlu_loglikelihood_moral_scenarios.yaml | 8 + .../mmlu_loglikelihood_nutrition.yaml | 7 + .../mmlu_loglikelihood_philosophy.yaml | 7 + .../mmlu_loglikelihood_prehistory.yaml | 7 + ...loglikelihood_professional_accounting.yaml | 8 + .../mmlu_loglikelihood_professional_law.yaml | 8 + ...u_loglikelihood_professional_medicine.yaml | 8 + ...loglikelihood_professional_psychology.yaml | 8 + .../mmlu_loglikelihood_public_relations.yaml | 8 + .../mmlu_loglikelihood_security_studies.yaml | 8 + .../mmlu_loglikelihood_sociology.yaml | 7 + .../mmlu_loglikelihood_us_foreign_policy.yaml | 8 + .../mmlu_loglikelihood_virology.yaml | 7 + .../mmlu_loglikelihood_world_religions.yaml | 8 + .../flan_n_shot/mmlu_machine_learning.yaml | 8 + .../mmlu/flan_n_shot/mmlu_management.yaml | 7 + .../mmlu/flan_n_shot/mmlu_marketing.yaml | 7 + .../flan_n_shot/mmlu_medical_genetics.yaml | 8 + .../mmlu/flan_n_shot/mmlu_miscellaneous.yaml | 7 + .../mmlu/flan_n_shot/mmlu_moral_disputes.yaml | 8 + .../flan_n_shot/mmlu_moral_scenarios.yaml | 8 + .../mmlu/flan_n_shot/mmlu_nutrition.yaml | 7 + .../mmlu/flan_n_shot/mmlu_philosophy.yaml | 7 + .../mmlu/flan_n_shot/mmlu_prehistory.yaml | 7 + .../mmlu_professional_accounting.yaml | 8 + .../flan_n_shot/mmlu_professional_law.yaml | 8 + .../mmlu_professional_medicine.yaml | 8 + .../mmlu_professional_psychology.yaml | 8 + .../flan_n_shot/mmlu_public_relations.yaml | 8 + .../flan_n_shot/mmlu_security_studies.yaml | 8 + .../mmlu/flan_n_shot/mmlu_sociology.yaml | 7 + .../flan_n_shot/mmlu_us_foreign_policy.yaml | 8 + .../tasks/mmlu/flan_n_shot/mmlu_virology.yaml | 7 + .../flan_n_shot/mmlu_world_religions.yaml | 8 + 235 files changed, 5082 insertions(+), 6 deletions(-) create mode 100644 lm_eval/tasks/mmlu/_cot_prompts.json rename lm_eval/tasks/mmlu/{gen_all_splits.py => _generate_configs.py} (76%) rename lm_eval/tasks/mmlu/{ => default}/hendrycks_test_original_default.yaml (100%) create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_abstract_algebra.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_anatomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_astronomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_business_ethics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_clinical_knowledge.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_computer_security.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_conceptual_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_econometrics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_electrical_engineering.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_elementary_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_formal_logic.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_global_facts.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_european_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_geography.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_government_and_politics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_macroeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_microeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_statistics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_us_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_world_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_human_aging.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_human_sexuality.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_international_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_jurisprudence.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_logical_fallacies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_machine_learning.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_management.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_marketing.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_medical_genetics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_miscellaneous.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_moral_disputes.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_moral_scenarios.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_nutrition.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_philosophy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_prehistory.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_accounting.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_public_relations.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_security_studies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_sociology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_us_foreign_policy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_virology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_world_religions.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_anatomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_clinical_knowledge.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_elementary_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_global_facts.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_government_and_politics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_macroeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_microeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_statistics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_us_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_aging.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_sexuality.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_international_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_jurisprudence.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_logical_fallacies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_machine_learning.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_marketing.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_medical_genetics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_miscellaneous.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_scenarios.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_nutrition.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_philosophy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_prehistory.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_accounting.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_security_studies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_us_foreign_policy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_virology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_world_religions.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_anatomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_astronomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_econometrics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_jurisprudence.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_anatomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_astronomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_econometrics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_jurisprudence.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_management.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_marketing.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_miscellaneous.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_nutrition.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_philosophy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_prehistory.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_sociology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_virology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_management.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_marketing.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_miscellaneous.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_nutrition.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_philosophy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_prehistory.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_sociology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_virology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml diff --git a/lm_eval/tasks/mmlu/_cot_prompts.json b/lm_eval/tasks/mmlu/_cot_prompts.json new file mode 100644 index 00000000..4714567a --- /dev/null +++ b/lm_eval/tasks/mmlu/_cot_prompts.json @@ -0,0 +1 @@ +{"abstract_algebra": "The following are multiple choice questions (with answers) about abstract algebra.\n\nQ: Statement 1 | Every element of a group generates a cyclic subgroup of the group. Statement 2 | The symmetric group S_10 has 10 elements.\n(A) True, True (B) False, False (C) True, False (D) False, True\nA: Let's think step by step. A cyclic group is a group that is generated by a single element. Hence a subgroup generated by a single element of a group is cyclic and Statement 1 is True. The answer is (C).\n\nQ: The symmetric group $S_n$ has $\nactorial{n}$ elements, hence it is not true that $S_{10}$ has 10 elements.\nFind the characteristic of the ring 2Z.\n(A) 0 (B) 3 (C) 12 (D) 30\nA: Let's think step by step. A characteristic of a ring is R is $n$ if the statement $ka = 0$ for all $a\\in 2Z$ implies that $k$ is a multiple of $n$. Assume that $ka = 0$ for all $a\\in 2Z$ for some $k$. In particular $2k = 0$. Hence $k=0$ and $n=0$. The answer is (A).\n\nQ: Statement 1| Every function from a finite set onto itself must be one to one. Statement 2 | Every subgroup of an abelian group is abelian.\n(A) True, True (B) False, False (C) True, False (D) False, True\nA: Let's think step by step. Statement 1 is true. Let $S$ be a finite set. If $f:S \nightarrow S$ is a onto function, then $|S| = |f(S)|$. If $f$ was not one to one, then for finite domain $S$ the image would have less than $S$ elements, a contradiction.\nStatement 2 is true. Let $G$ be an abelian group and $H$ be a subgroup of $G$. We need to show that $H$ is abelian. Let $a,b \\in H$. Then $a,b \\in G$ and $ab=ba$. Since $G$ is abelian, $ab=ba$. Since $H$ is a subgroup of $G$, $ab \\in H$. Therefore, $ab=ba$ and $H$ is abelian. The answer is (A).\n\nQ: Statement 1 | If aH is an element of a factor group, then |aH| divides |a|. Statement 2 | If H and K are subgroups of G then HK is a subgroup of G.\n(A) True, True (B) False, False (C) True, False (D) False, True\nA: Let's think step by step. Statement 2 is false. Let $H$ be a subgroup of $S_3$ generated by the cycle $(1,2)$ and $K$ be a subgroup of $S_3$ generated by the cycle $(1,3)$. Both $H$ and $K$ have two elements, the generators and the identity. However $HK$ contains cycles (1,2), (1,3) and (2,3,1), but the inverse of (2,3,1) is (2,1,3) and it does not belong to HK, hence HK is not a subgroup. The answer is (B).\n\nQ: Find all c in Z_3 such that Z_3[x]/(x^2 + c) is a field.\n(A) 0 (B) 1 (C) 2 (D) 3\nA: Let's think step by step. Z_3[x]/(x^2 + c) is a field if and only if x^2 + c does not have roots in Z_3. That is x^2 + c != 0 for every x in Z_3. If c = 0, then x^2 + c = x^2 has root 0. If c = 1 then x^2 + c = x^2 + 1 = 0 + 1 for x = 0, 1 + 1 = 2 for x = 1 and 1 + 1 = 2 for x = 2, hence x^2 + 1 does not have any roots. For c = 2 the polynomial x^2 + 2 has two roots at x = 1 and x = 2. Hence Z_3[x]/(x^2 + c) is a field if and only if c = 1. The answer is (B).", "anatomy": "The following are multiple choice questions (with answers) about anatomy.\n\nQ: Which of the following is the body cavity that contains the pituitary gland?\n(A) Abdominal (B) Cranial (C) Pleural (D) Spinal\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. The pituitary gland is the major endocrine gland attached to the base of the brain, and it is contained in the Cranial cavity. The answer is (B).\n\nQ: Which of these branches of the trigeminal nerve contain somatic motor processes?\n(A) The supraorbital nerve (B) The infraorbital nerve (C) The mental nerve (D) None of the above\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. \nWe know the following: (A) The supraorbital nerve (also known as the frontal nerve) is the largest branch of the ophthalmic nerve and branch of ophthalmic division of the trigeminal nerve. (B) The infraorbital nerve is a branch of the maxillary division of the trigeminal nerve. (C) The mental nerve is a branch of the mandibular division of the trigeminal nerve. Because all these nerves are purely sensory nerves and do not contain any somatic motor processes. Therefore, the answer should be none of the above, which is (D). The answer is (D).\n\nQ: In Angle's Class II Div 2 occlusion there is\n(A) excess overbite of the upper lateral incisors. (B) negative overjet of the upper central incisors. (C) excess overjet of the upper lateral incisors. (D) excess overjet of the upper central incisors.\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. This is a question related to anatomy and orthodontics. Excess overjet is associated with Class II occlusions; therefore, we can safely eliminate (B) from the list, as negative overjet is often associated with Class III occlusions. Now, we need to determine the location of the excess overjet, and that would be the upper (maxillary) lateral incisors. Only (C) has the correct information. The answer is (C).\n\nQ: The pleura\n(A) have no sensory innervation. (B) are separated by a 2 mm space. (C) extend into the neck. (D) are composed of respiratory epithelium.\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. First, recall that the pleura refers to the thin layer of tissue that covers the lungs and lines the interior wall of the chest cavity. Now, let\u2019s look at each option:\nOption (A): \u201cThe pleura have no sensory innervation.\u201d This information is not correct. The pleura do have a sensory innervation.\nOption (B): \u201cThe pleura are separated by a 2 mm space.\u201d This information is not correct. There is a very thin \u201cpotential\u201d space between the layers of the pleura; however, it is typically filled with serous pleural fluid. \nOption (C): \u201cThe pleura extend into the neck.\u201d This information is actuakky true. The cervical pleura, also known as the dome of the pleuradome of the pleura, lines the extendsiton of the pleural cavity into the neck.\nOption (D): \u201cThe pleura are composed of respiratory epithelium.\u201d This information is not correct. The pleaura are composed of connective tissue (CT).\nBecause (A), (B), and (D) are all incorrect, (D) is the only correct answer. The answer is (C).\n\nQ: What is the embryological origin of the hyoid bone?\n(A) The first pharyngeal arch (B) The first and second pharyngeal arches (C) The second pharyngeal arch (D) The second and third pharyngeal arches\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. The hyoid bone, which is also known as the hyooid, is a a small U-shaped bone located in the anterior neck. In its resting position, it lies between the ase of the mandible and the third cervical vertebrae. We know that the second and the third pharyngeal arches give rise to the horns of the hyoid bone; therefore, the embryological origin of the hyoid bone are the second and the third pharyngeal arches\u2014this information is covered in the last option (D). Therefore, we conclude that (D) must be the correct answer. The answer is (D).", "astronomy": "The following are multiple choice questions (with answers) about astronomy.\n\nQ: Where do most short-period comets come from and how do we know?\n(A) The Kuiper belt; short period comets tend to be in the plane of the solar system just like the Kuiper belt. (B) The Kuiper belt; short period comets tend to come from random directions indicating a spherical distribution of comets called the Kuiper belt. (C) The asteroid belt; short period comets have orbital periods similar to asteroids like Vesta and are found in the plane of the solar system just like the asteroid belt. (D) The Oort cloud; short period comets tend to be in the plane of the solar system just like the Oort cloud.\nA: Let's think step by step. Most short-period comets come from the Kuiper belt, and we know because short period coments tend to be in the plane of the solar system, just like the Kuiper belt is. The answer is (A).\n\nQ: You are pushing a truck along a road. Would it be easier to accelerate this truck on Mars? Why? (Assume there is no friction)\n(A) It would be harder since the truck is heavier on Mars. (B) It would be easier since the truck is lighter on Mars. (C) It would be harder since the truck is lighter on Mars. (D) It would be the same no matter where you are.\nA: Let's think step by step. If we assume that there is no friction, the force needed to accelerate the truck is by Newton\u2019s second law only dependent on the mass of the truck. Hence (A), (B) and (C) are incorrect since it doesn\u2019t matter that it\u2019s on Mars, and (D) is the correct answer. The answer is (D).\n\nQ: Say the pupil of your eye has a diameter of 5 mm and you have a telescope with an aperture of 50 cm. How much more light can the telescope gather than your eye?\n(A) 10000 times more (B) 100 times more (C) 1000 times more (D) 10 times more\nA: Let's think step by step. The amount of light is proportional to the aperture area $A = \\pi D^2/4$ for a lens with diameter $D$, so the relative amounts of light between the eye with diameter 5mm and the telescope with diameter 50mm is $(50 cm)^2/(5mm)^2 = 10000$. The answer is (A).\n\nQ: Why isn't there a planet where the asteroid belt is located?\n(A) A planet once formed here but it was broken apart by a catastrophic collision. (B) There was not enough material in this part of the solar nebula to form a planet. (C) There was too much rocky material to form a terrestrial planet but not enough gaseous material to form a jovian planet. (D) Resonance with Jupiter prevented material from collecting together to form a planet.\nA: Let's think step by step. The asteroid belt is a stellar disc consisting of a large number of asteroids between Mars and Jupiter's orbits. The asteroids in this belt are affected by the gravitational pull from both other asteroids and nearby planets. Due to the strong gravitational force of Jupiter there are resonances that give rise to low density regions of asteroids known as the Kirkwood gap. So (B) and (C) are not correct since it\u2019s not a lack of material that prevents a planet from being formed, and (A) is incorrect because the Kirkwood gap would have prevented a planet from forming in the first place, and (D) is the correct option. The answer is (D).\n\nQ: Why is Mars red?\n(A) Because the surface is covered with heavily oxidized (\"rusted\") minerals. (B) Because the atmosphere scatters more light at bluer wavelengths transmitting mostly red light. (C) Because Mars is covered with ancient lava flows which are red in color. (D) Because flowing water on Mars's surface altered the surface minerals several billion years ago.\nA: Let's think step by step. Option (B) is not correct because if the red color was caused by the scattering off the atmosphere, then the earth with a much thicker atmosphere would also look red. Options (C) and (D) are not specific enough about why the color of the surface would be red, while (A) is correct because it explains that the surface is red due to the rusted materials on the surface and the red color comes from the rust. So the correct option is (A). The answer is (A).", "business_ethics": "The following are multiple choice questions (with answers) about business ethics.\n\nQ: In contrast to _______, _______ aim to reward favourable behaviour by companies. The success of such campaigns have been heightened through the use of ___________, which allow campaigns to facilitate the company in achieving _________ .\n(A) Buycotts, Boycotts, Blockchain technology, Charitable donations (B) Buycotts, Boycotts, Digital technology, Increased Sales (C) Boycotts, Buyalls, Blockchain technology, Charitable donations (D) Boycotts, Buycotts, Digital technology, Increased Sales\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cIn contrast to *boycotts*, *buycotts* aim to reward favourable behavior by companies. The success of such campaigns have been heightened through the use of *digital technology*, which allow campaigns to facilitate the company in achieving *increased sales*.\u201d The answer is (D).\n\nQ: _______ is the direct attempt to formally or informally manage ethical issues or problems, through specific policies, practices and programmes.\n(A) Corporate social responsibility (B) Business ethics management (C) Sustainability (D) Environmental management\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The direct attempt manage ethical issues through specific policies, practices, and programs is business ethics management. The answer is (B).\n\nQ: Three contrasting tactics that CSO's can engage in to meet their aims are ________ which typically involves research and communication, ________, which may involve physically attacking a company's operations or ________, often involving some form of _______.\n(A) Non-violent direct action, Violent direct action, Indirect action, Boycott (B) Indirect action, Instrumental action, Non-violent direct action, Information campaign (C) Indirect action, Violent direct action, Non-violent direct-action Boycott (D) Non-violent direct action, Instrumental action, Indirect action, Information campaign\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cThree contrasting tactics that CSO's can engage in to meet their aims are *indirect action*, which typically involves research and communication, *violent direct action*, which may involve physically attacking a company's operations or *non-violent direct action*, often involving some form of *boycott*.\u201d The answer is (C).\n\nQ: To ensure the independence of the non-executive board members, there are a number of steps which can be taken, which include non-executives being drawn from _______ the company, being appointed for a _________ time period as well as being appointed _________.\n(A) Outside, Limited, Independently (B) Inside, Limited, Intermittently (C) Outside, Unlimited, Intermittently (D) Inside, Unlimited, Independently\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cTo ensure the independence of the non-executive board members, there are a number of steps which can be taken, which include non-executives being draw from *outside* the company, being appointed for a *limited* time period as well as being imported *independently*. The answer is (A).\n\nQ: Beyond the business case for engaging in CSR there are a number of moral arguments relating to: negative _______, the _______that corporations possess and the ________ of business and society.\n(A) Externalities, Power, Independence (B) Publicity, Insubstantial resources, Mutual dependence (C) Publicity, Power, Independence (D) Externalities, Power, Mutual dependence\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cBeyond the business case for engaging the CSR there are a number of moral arguments relating to: negative *externalities*, the *power* that corporations possess and the *mutual independence* of business and society. The answer is (D).", "clinical_knowledge": "The following are multiple choice questions (with answers) about clinical knowledge.\n\nQ: Glycolysis is the name given to the pathway involving the conversion of:\n(A) glycogen to glucose-1-phosphate. (B) glycogen or glucose to fructose. (C) glycogen or glucose to pyruvate or lactate. (D) glycogen or glucose to pyruvate or acetyl CoA.\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. Glycolysis is the name given to the pathway involving conversion of glycogen or glucose to pyruvate or lactate. The answer is (C).\n\nQ: What is the difference between a male and a female catheter?\n(A) Male and female catheters are different colours. (B) Male catheters are longer than female catheters. (C) Male catheters are bigger than female catheters. (D) Female catheters are longer than male catheters.\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. The difference between a male and female catheter is that male catheters tend to be longer than female catheters. The answer is (B).\n\nQ: How many attempts should you make to cannulate a patient before passing the job on to a senior colleague, according to the medical knowledge of 2020?\n(A) 4 (B) 3 (C) 2 (D) 1\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. According to the medical protocol as of 2020, you should make two attempts to cannulate a patient before passing the job on to a more-senior practitioner. The answer is (C).\n\nQ: In the assessment of the hand function which of the following is true?\n(A) Abduction of the thumb is supplied by spinal root T2 (B) Opposition of the thumb by opponens policis is supplied by spinal root T1 (C) Finger adduction is supplied by the median nerve (D) Finger abduction is mediated by the palmar interossei\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. Of all the options, it is only true that the opposition of the thumb by opponens pollicis is supplied by spinal root T1. The answer is (B).\n\nQ: The energy for all forms of muscle contraction is provided by:\n(A) ATP. (B) ADP. (C) phosphocreatine. (D) oxidative phosphorylation.\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. The energy for muscular contraction is provided by ATP (adenosine triphosphate), which is the powerhouse of the cell. The answer is (A).", "college_biology": "The following are multiple choice questions (with answers) about college biology.\n\nQ: Which of the following represents an accurate statement concerning arthropods?\n(A) They possess an exoskeleton composed primarily of peptidoglycan. (B) They possess an open circulatory system with a dorsal heart. (C) They are members of a biologically unsuccessful phylum incapable of exploiting diverse habitats and nutrition sources. (D) They lack paired, jointed appendages.\nA: Let's think step by step. Peptidoglycan is known to comprise the plasma membrane of most bacteria, rather than the exoskeleton of arthropods, which is made of chitin, which rules out (A). The answer (C) is false because arthropods are a highly successful phylum. Likewise, arthropods have paired, jointed appendages, which rules out (D). The only remaining option is (B), as arthropods have an open circulatory system with a dorsal tubular heart. The answer is (B).\n\nQ: In a given population, 1 out of every 400 people has a cancer caused by a completely recessive allele, b. Assuming the population is in Hardy-Weinberg equilibrium, which of the following is the expected proportion of individuals who carry the b allele but are not expected to develop the cancer?\n(A) 1/400 (B) 19/400 (C) 20/400 (D) 38/400\nA: Let's think step by step. According to the Hardy Weinberg Law, $p^2 + 2 p q + q^2 = 1$, and $p + q = 1$ where $p$ is the frequency of the dominant allele, $q$ is the frequency of the recessive allele, and $p^2$, $q^2$, and $2pq$ are the frequencies of dominant homozygous, recessive homozygous, and heterozygous individuals, respectively. \u200bThe frequency of the recessive allele (q) is $\\sqrt{\frac{1}{400}} = 0.05$. We have $p = 1 - q = 0.95$. The frequency of heterozygous individuals is $2pq = 2 \\cdot 0.05 \\cdot 0.95 = 0.095$. The number of heterozygous individuals is equal to the frequency of heterozygous individuals times the size of the population, or $0.095 * 400 = 38$. So we end up with 38/400. The answer is (D).\n\nQ: According to the pressure-flow model of movement of phloem contents, photosynthate movement from source to sink is driven by\n(A) an ATP-dependent pressure-flow pump (B) a water-pressure potential gradient (C) transpiration (D) apoplastic diffusion\nA: Let's think step by step. It is a gradient in water pressure that induces the movement of phloem content, which refers to answer (B). The mechanism of movement does not rely on metabolism, which rules out (A). Transpiration refers to the exhalation of water vapor through plant stomata, and is also not related, which rules out (C). While the apoplastic pathway is one of two main pathways for water transport in plants, it is not central to the pressure flow model, which rules out (D). The answer is (B).\n\nQ: Which of the following contain DNA sequences required for the segregation of chromosomes in mitosis and meiosis?\n(A) Telomeres (B) Centromeres (C) Nucleosomes (D) Spliceosomes\nA: Let's think step by step. The genetic material in Telomeres is not used, which rules out (A). Nucleosomes are the repeating subunit that comprises chromatin packed in a cell nucleus, and do not specifically refer to DNA sequences necessary for segregating chromosomes in cell division, which rules out (C). A spliceosome is a large ribonucleoprotein that removes introns from transcribed pre-mRNA rather than governing chromosome segregation. Centromeres are directly responsible for segregating chromosomes in cell division. The answer is (B).\n\nQ: The presence of homologous structures in two different organisms, such as the humerus in the front limb of a human and a bird, indicates that\n(A) the human and bird are polyphyletic species (B) a human's and bird's evolution is convergent (C) the human and bird belong to a clade (D) the human and bird developed by analogy\nA: Let's think step by step. Polyphyletic species are organisms that are grouped due to having similar characteristics but which do not have a common ancestor. This is not the case for humans and birds, which rules out (A). Convergent evolution refers to the indepdendent development of similar features in different species at different periods, which is also not the case for humans and birds, which rules out (B). Analogy refers to the superficial resemblance of structures that have different origins, which is not the case for the human and bird forearms, which rules out (D). Humans and birds do belong to the same clade - a group of organisms composed of a common ancestor. The answer is (C).", "college_chemistry": "The following are multiple choice questions (with answers) about college chemistry.\n\nQ: 3 Cl\u2212(aq) + 4 CrO_4^2\u2212(aq) + 23 H+(aq) \u2192 3 HClO2(aq) + 4 Cr3+(aq) + 10 H2O(l). In the reaction shown above, Cl\u2212(aq) behaves as\n(A) an acid (B) a base (C) a catalyst (D) a reducing agent\nA: Let's think step by step. A molecule that behaves as a base accepts an H+ ion (or proton) from another molecule, whereas a molecule that behaves as an acid donates an H+ ion (or proton) to another molecule. Neither of these is the case for Cl in this reaction, which rules out (A) and (B). A catalyst is a substance that only accelerates a reaction without itself undergoing chemical change, which is not the case here. This rules out (C). Instead, the $Cl^{-} molecules carry a negative charge, which they donate in the reaction to form 3 HClO2. This is the behavior of a reducing agent, or (D). The answer is (D).\n\nQ: Which of the following statements about the lanthanide elements is NOT true?\n(A) The most common oxidation state for the lanthanide elements is +3. (B) Lanthanide complexes often have high coordination numbers (> 6). (C) All of the lanthanide elements react with aqueous acid to liberate hydrogen. (D) The atomic radii of the lanthanide elements increase across the period from La to Lu.\nA: Let's think step by step. The atomic radii of the lanthanide elements in fact decrease across the period from La to Lu. Options (A), (B), and (C) are all true. This means that only (D) is NOT true. The answer is (D).\n\nQ: Which of the following lists the hydrides of group-14 elements in order of thermal stability, from lowest to highest?\n(A) PbH4 < SnH4 < GeH4 < SiH4 < CH4 (B) PbH4 < SnH4 < CH4 < GeH4 < SiH4 (C) CH4 < SiH4 < GeH4 < SnH4 < PbH4 (D) CH4 < PbH4 < GeH4 < SnH4 < SiH4\nA: Let's think step by step. The thermal stability of group-14 hydrides decreases as we move from the top of group 14 to the bottom. The order of elements in the group from top to bottom is C, Si, Ge, Sn, Pb. Therefore in order of increasing thermal stability we have PbH4, SnH4, GeH4, SiH4, and CH4, or answer (A). The answer is (A).\n\nQ: Predict the number of lines in the EPR spectrum of a solution of 13C-labelled methyl radical (13CH3\u2022), assuming the lines do not overlap.\n(A) 4 (B) 3 (C) 6 (D) 24 (E) 8\nA: Let's think step by step. The electron paramagnetic resonance spectrum will be split by two forms of interactions. The first is the hyperfine interaction with the 13C (nuclear spin $I = \nrac{1}{2}$) which will split the spectrum into 2 lines. This will be further split into 4 lines by the interaction with three equivalent 1H nuclei. The total number of lines is therefore $2 \\cdot 4 = 8$. The answer is (E).", "college_computer_science": "The following are multiple choice questions (with answers) about college computer science.\n\nQ: Which of the following regular expressions is equivalent to (describes the same set of strings as) (a* + b)*(c + d)?\n(A) a*(c + d)+ b(c + d)\n(B) a*(c + d)* + b(c + d)*\n(C) a*(c + d)+ b*(c + d)\n(D) (a + b)*c +(a + b)*d\nA: Let's think step by step. We know that:\n1. (X* + Y)* = (X + Y)*\n2. X(Y + Z)? = XY + XZ\nUsing equation 1 we can rewrite (a* + b)*(c + d)? as:\n3. (a + b)*(c + d)?\nUsing equation 2 we can rewrite equation 3 as:\n(a + b)*c + (a + b)*d The answer is (D).\n\nQ: The Singleton design pattern is used to guarantee that only a single instance of a class may be instantiated. Which of the following is (are) true of this design pattern?\nI. The Singleton class has a static factory method to provide its instance.\nII. The Singleton class can be a subclass of another class.\nIII. The Singleton class has a private constructor.\n(A) I only\n(B) II only\n(C) III only\n(D) I, II, and III\nA: Let's think step by step. Statement I is a correct statement about a Singleton, because a Singleton restricts instantiation to a single, static method. Statement II is also correct, because there is no inherent restriction regarding the inheritance of a Singleton. Statement III is also correct, because a Singletons must be instantiated only once, so its constructor is made private to prevent any construction except via its static factory method.\nGiven these facts, statements I, II, and III are all correct. The answer is (D).\n\nQ: A certain pipelined RISC machine has 8 general-purpose registers R0, R1, . . . , R7 and supports the following operations:\nADD Rs1, Rs2, Rd (Add Rs1 to Rs2 and put the sum in Rd)\nMUL Rs1, Rs2, Rd (Multiply Rs1 by Rs2 and put the product in Rd)\nAn operation normally takes one cycle; however, an operation takes two cycles if it produces a result required by the immediately following operation in an operation sequence.\nConsider the expression AB + ABC + BC, where variables A, B, C are located in registers R0, R1, R2. If the contents of these three registers must not be modified, what is the minimum number of clock cycles required for an operation sequence that computes the value of AB + ABC + BC?\n(A) 5 (B) 6 (C) 7 (D) 8\nA: Let's think step by step. First, we are given that A is in R0, B is in R1, and C is in R2.\nNext, we can see that we must compute three multiplies (AB, BC, and ABC) and two adds (AB + ABC, (AB + ABC) + BC) to compute our final answer, resulting in a minimum of five clock cycles.\nNext, we can see that there is no way to avoid at least one pipeline stall when computing our final answer, because to compute our final sum we must wait at least one cycle for the results from the previous stage to be ready. Thus, our minimum number of cycles must be 6.\nWe can verify that we can create a solution that requires only six cycles as follows:\ncompute AB: MUL R0, R1, R3\ncompute BC: MUL R1, R2, R4\ncompute ABC: MUL R3, R4, R5\ncompute AB + BC: ADD R3, R4, R6\nSTALL\ncompute AB + ABC + BC: ADD R5, R6, R7\nSo there are 6 cycles. The answer is (B).\n\nQ: A compiler generates code for the following assignment statement.\nG := (A + B) * C - (D + E) * F\nThe target machine has a single accumulator and a single-address instruction set consisting of instructions load, store, add, subtract, and multiply. For the arithmetic operations, the left operand is taken from the accumulator and the result appears in the accumulator. The smallest possible number of instructions in the resulting code is\n(A) 5 (B) 6 (C) 7 (D) 9\nA: Let's think step by step. We can compute the final answer with the following sequence of operations:\n1. LOAD D (accumulator = D)\n2. ADD E (accumulator = D+E)\n3. MUL F (accumulator = (D+E)*F)\n4. STORE X (X = (D+E)*F)\n5. LOAD A (accumulator = A)\n6. ADD B (accumulator = A+B)\n7. MUL C (accumulator = (A+B)*C)\n8. SUB X (accumulator = (A+B)*C - (D+E)*F)\n9. STORE G (G = (A+B)*C - (D+E)*F)\nThis sequence takes 9 instructions. The answer is (D).\n\nQ: Consider a computer design in which multiple processors, each with a private cache memory, share global memory using a single bus. This bus is the critical system resource. Each processor can execute one instruction every 500 nanoseconds as long as memory references are satisfied by its local cache. When a cache miss occurs, the processor is delayed for an additional 2,000 nanoseconds. During half of this additional delay, the bus is dedicated to serving the cache miss. During the other half, the processor cannot continue, but the bus is free to service requests from other processors. On average, each instruction requires 2 memory references. On average, cache misses occur on 1 percent of references. What proportion of the capacity of the bus would a single processor consume, ignoring delays due to competition from other processors?\n(A) 1/50 (B) 1/27 (C) 1/25 (D) 2/27\nA: Let's think step by step. We know that each instruction requires two memory references per instruction, and that there is an average cache miss rate of one percent.\nThus a given processor has:\n(1 cache miss / 100 references) * (2 references / instruction) =\n(2 cache misses / 100 instructions), so:\nmisses_per_instruction = 1 cache miss / 50 instructions.\nNext, we know that each instruction requires 500 nanoseconds when there is no cache miss, and 500 + 2000 = 2500 nanoseconds when there is a cache miss. Thus:\n50 instructions / (49 * 500) + (1 * 2500) nanoseconds, so:\ninstructions_per_ns = 50 instructions / 27000 nanoseconds.\nNow, we know that each cache miss locks the bus for half of the 2000 nanosecond cache miss delay, or 1000 nanoseconds, so:\nlock_ns_per_miss = 1000 nanoseconds / cache miss.\nThus we can see that on average a single processor will lock the bus for:\nlock_ns_per_miss * misses_per_instruction * instructions_per_ns =\n(1000 nanoseconds / cache miss) * (1 cache miss / 50 instructions) * (50 instructions / 27000 nanoseconds) = 1000 * (1/50) * (50/27000) = 1000/27000 = 1/27. The answer is (B).", "college_mathematics": "The following are multiple choice questions (with answers) about college mathematics.\n\nQ: Let V be the set of all real polynomials p(x). Let transformations T, S be defined on V by T:p(x) -> xp(x) and S:p(x) -> p'(x) = d/dx p(x), and interpret (ST)(p(x)) as S(T(p(x))). Which of the following is true?\n(A) ST = 0 (B) ST = T (C) ST = TS (D) ST - TS is the identity map of V onto itself.\nA: Let's think step by step. For a given polynomial $p$ we have\n\\[ST(p) = (xp(x))\u2019 = p(x) + xp\u2019(x)\\]\nand\n\\[TS(p) = xp\u2019(x).\\]\nHence \\[ST(p) - TS(p) = p(x) + xp\u2019(x) - xp\u2019(x).\\] The answer is (D).\n\nQ: Suppose that f(1 + x) = f(x) for all real x. If f is a polynomial and f(5) = 11, then f(15/2)\n(A) -11 (B) 0 (C) 11 (D) 33/2\nA: Let's think step by step. The only polynomial so that $f(1 + x) = f(x)$ is a constant polynomial. Hence $f(5) = 11 = f(15/2)$. The answer is (C).\n\nQ: Let A be a real 2x2 matrix. Which of the following statements must be true?\nI. All of the entries of A^2 are nonnegative.\nII. The determinant of A^2 is nonnegative.\nIII. If A has two distinct eigenvalues, then A^2 has two distinct eigenvalues.\n(A) I only (B) II only (C) III only (D) II and III only\nA: Let's think step by step. We have \\[ det(A^2) = (det(A))^2 \\geq 0,\\] hence II holds.\nIII is false: as a counterexample take a diagonal matrix with -1 and 1 on the diagonal. Then $A^2$ is the identity matrix. The answer is (B).\n\nQ: Let A be the set of all ordered pairs of integers (m, n) such that 7m + 12n = 22. What is the greatest negative number in the set B = {m + n : (m, n) \\in A}?\n(A) -5 (B) -4 (C) -3 (D) -2\nA: Let's think step by step. We have 12n = 22 - 7m and one of the solutions is $m = -2$, $n = 3$. Then $m + n = 1$, hence we need to look for smaller $m$ in order to make $m + n$ negative. The next solution is $m = -14$ and $n = 10$. For smaller $m$ we have $m + n$ smaller than $-4$. The answer is (B).\n\nQ: A tank initially contains a salt solution of 3 grams of salt dissolved in 100 liters of water. A salt solution containing 0.02 grams of salt per liter of water is sprayed into the tank at a rate of 4 liters per minute. The sprayed solution is continually mixed with the salt solution in the tank, and the mixture flows out of the tank at a rate of 4 liters per minute. If the mixing is instantaneous, how many grams of salt are in the tank after 100 minutes have elapsed?\n(A) 2 (B) 2 - e^-2 (C) 2 + e^-2 (D) 2 + e^-4\nA: Let's think step by step. For all $t \\in \\mathbb{R}$, let $s(t)$ denote the number grams of salt in the tank at the $t$ minute mark. Then $s(0) = 3$.\nWe use $s$ and $s(t)$ interchangeably. We also use $s^{\\prime}$ and $s^{\\prime}(t)$ interchangeably. The solution sprayed into the tank adds $(0.02) 4=2 / 25$ grams of salt per minute. There are always 100 liters of liquid in the tank, containing $s$ grams of salt. So the density of salt in the tank is $s / 100$ grams per liter. The flow of water out of the tank therefore subtracts $4(s / 100)=s / 25$ grams of salt per minute. Then, for all $t \\in \\mathbb{R}$, we have $s^{\\prime}(t)=(2 / 25)-(s / 25)=(2-s) / 25$, and so $[s(t)=2] \\Rightarrow\\left[s^{\\prime}(t)=0\right]$. For all $t \\in \\mathbb{R}$,\n$$\n\frac{d}{d t}[\\ln (s-2)]=\frac{s^{\\prime}}{s-2}=\frac{-1}{25}=\frac{d}{d t}\\left[-\frac{t}{25}\right] .\n$$\nChoose $C \\in \\mathbb{R}$ such that, for all $t \\in \\mathbb{R}, \\ln ((s(t)-2))=-[t / 25]+C$. Let $K:=e^{C}$. Then, for all $t \\in \\mathbb{R}$, we have $(s(t))-2=K e^{-t / 25}$, and so $s(t)=2+K e^{-t / 25}$. Then $3=s(0)=2+K e^{0}=2+K$, so $K=1$. Then $s(100)=2+K e^{-100 / 25}=2+1 \\cdot e^{-4}=2+e^{-4}$. The answer is (D).", "college_medicine": "The following are multiple choice questions (with answers) about college medicine.\n\nQ: An expected side effect of creatine supplementation is:\n(A) muscle weakness. (B) gain in body mass. (C) muscle cramps. (D) loss of electrolytes.\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Creatine supplementation is a dietary supplement that results in body mass gain. The answer is (B).\n\nQ: Which of the following is not a true statement?\n(A) Muscle glycogen is broken down enzymatically to glucose-1-phosphate (B) Elite endurance runners have a high proportion of Type I fibres in their leg muscles (C) Liver glycogen is important in the maintenance of the blood glucose concentration (D) Insulin promotes glucose uptake by all tissues in the body\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Let\u2019s solve this step by step and go over each choice: \n(A) \u201cMuscle glycogen is broken down enzymatically to glucose-1-phosphate\u201d: This is a correct statement.\n(B) \u201cElite endurance runners have a high proportion of Type I fibres in their leg muscles\u201d: This is a correct statement.\n(C) \u201cLiver glycogen is important in the maintenance of the blood glucose concentration\u201d: This is a correct statement. \n(D) \u201cInsulin promotes glucose uptake by all tissues in the body\u201d: This is not a correct statement, because insulin promotes glucose uptake by the liver, adipose tissue, and muscle, but not all tissues. For instance, the tissues in the brain and red blood cells are not affected by insulin. The answer is (D).\n\nQ: A high school science teacher fills a 1 liter bottle with pure nitrogen and seals the lid. The pressure is 1.70 atm, and the room temperature is 25\u00b0C. Which two variables will both increase the pressure of the system, if all other variables are held constant?\n(A) Increasing temperature, increasing moles of gas (B) Increasing temperature, increasing volume (C) Decreasing volume, decreasing temperature (D) Decreasing moles of gas, increasing volume\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The relevant equation for this is the ideal gas law: PV=nRT. To increase the pressure of the system (P), then either n (number of moles of the gas) or T (temperature) have to increase. The answer is (A).\n\nQ: In a genetic test of a newborn, a rare genetic disorder is found that has X-linked recessive transmission. Which of the following statements is likely true regarding the pedigree of this disorder?\n(A) All descendants on the maternal side will have the disorder. (B) Females will be approximately twice as affected as males in this family. (C) All daughters of an affected male will be affected. (D) There will be equal distribution of males and females affected.\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Let\u2019s solve this step by step. Let's recall first that females have two X chromosomes, while males have one X and one Y chromosome. This is an important fact we need to know before answering this question. \nBecause a male can only pass his only one X chromosome to a daughter, if he is affected by this rare genetic disorder, then we know for sure that he will pass this rare genetic disorder to all his future-born daughters. Therefore, \u201c(C): All daughters of an affected male will be affected\u201d is a correct statement. The answer is (C).\n\nQ: Glucose is transported into the muscle cell:\n(A) via protein transporters called GLUT4. (B) only in the presence of insulin. (C) via hexokinase. (D) via monocarbylic acid transporters.\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Glucose (also known as the blood sugar) is the main sugar found in the human body. It is transported into the muscle cell via diffusion through protein transporters called GLUT4. The answer is (A).", "college_physics": "The following are multiple choice questions (with answers) about college physics.\n\nQ: A refracting telescope consists of two converging lenses separated by 100 cm. The eye-piece lens has a focal length of 20 cm. The angular magnification of the telescope is\n(A) 4 (B) 5 (C) 6 (D) 20\nA: Let's think step by step. In a refracting telescope, if both lenses are converging, the focus of both lenses must be between the two lenses, and thus the focal lengths of the two lenses must add up to their separation. Since the focal length of one lens is 20 cm, the focal length of the other must be 80 cm. The magnification is the ratio of these two focal lengths, or 4. The answer is (A).\n\nQ: The muon decays with a characteristic lifetime of about 10^-6 second into an electron, a muon neutrino, and an electron antineutrino. The muon is forbidden from decaying into an electron and just a single neutrino by the law of conservation of\n(A) charge (B) mass (C) energy and momentum (D) lepton number\nA: Let's think step by step. Lepton number must be conserved, meaning the total number of leptons minus the number of antileptons. If a muon decays into an electron and a single neutrino, the total lepton number would go from one to two, violating lepton number conservation. The answer is (D).\n\nQ: One end of a Nichrome wire of length 2L and cross-sectional area A is attached to an end of another Nichrome wire of length L and cross- sectional area 2A. If the free end of the longer wire is at an electric potential of 8.0 volts, and the free end of the shorter wire is at an electric potential of 1.0 volt, the potential at the junction of the two wires is most nearly equal to\n(A) 2.4 V (B) 3.3 V (C) 4.5 V (D) 5.7 V\nA: Let's think step by step. This is a simple voltage divider problem, where the longer wire has a resistance four times that of the shorter end. So the voltage divider ratio is 1 / 5, meaning that the potential in the middle is 1.0 V + (8.0 V - 1.0 V) * 1/5 = 2.4 V. The answer is (A).\n\nQ: A refracting telescope consists of two converging lenses separated by 100 cm. The eye-piece lens has a focal length of 20 cm. The angular magnification of the telescope is\n(A) 4 (B) 5 (C) 6 (D) 20\nA: Let's think step by step. In a refracting telescope, if both lenses are converging, the focus of both lenses must be between the two lenses, and thus the focal lengths of the two lenses must add up to their separation. Since the focal length of one lens is 20 cm, the focal length of the other must be 80 cm. The magnification is the ratio of these two focal lengths, or 4. The answer is (A).\n\nQ: For which of the following thermodynamic processes is the increase in the internal energy of an ideal gas equal to the heat added to the gas?\n(A) Constant temperature (B) Constant volume (C) Constant pressure (D) Adiabatic\nA: Let's think step by step. Heat added to the gas can go into the gases internal energy or work done against an external force. However, if the volume of the gas container is constant, no work will be done (since work is pressure times change in volume). So, at constant volume, all of the heat goes into the internal energy. The answer is (B).", "computer_security": "The following are multiple choice questions (with answers) about computer security.\n\nQ: SHA-1 has a message digest of\n(A) 160 bits (B) 512 bits (C) 628 bits (D) 820 bits\nA: Let's think step by step. Since SHA-1 is a hash function which takes an input and produces a 160-bit (20-byte) hash value, its message digest is 160 bits. The answer is (A).\n\nQ: _____________ can modify data on your system \u2013 so that your system doesn\u2019t run correctly or you can no longer access specific data, or it may even ask for ransom in order to give your access.\n(A) IM \u2013 Trojans (B) Backdoor Trojans (C) Trojan-Downloader (D) Ransom Trojan\nA: Let's think step by step. The system is asking for trojans, which are for ransom, which means ransom trojan. The answer is (D).\n\nQ: What is ethical hacking?\n(A) \"Hacking\" ethics so they justify unintended selfish behavior (B) Hacking systems (e.g., during penetration testing) to expose vulnerabilities so they can be fixed, rather than exploited (C) Hacking into systems run by those whose ethics you disagree with (D) A slang term for rapid software development, e.g., as part of hackathons\nA: Let's think step by step. Ethical hacking is a process of detecting vulnerabilities in an application, system, or organization's infrastructure that an attacker can use to exploit an individual or organization. They use this process to prevent cyberattacks and security breaches by lawfully hacking into the systems and looking for weak points. The answer is (B).\n\nQ: The ____________ is anything which your search engine cannot search.\n(A) Haunted web (B) World Wide Web (C) Surface web (D) Deep Web\nA: Let's think step by step. The search engine searches on the Surface Web, which is the portion of the world wide web which is visible so (B,C) are wrong. The Haunted Web doesn\u2019t correspond to an internet concept. The Deep Web is the part of the World Wide Web which is not indexed. The answer is (D).\n\nQ: Exploitation of the Heartbleed bug permits\n(A) overwriting cryptographic keys in memory (B) a kind of code injection (C) a read outside bounds of a buffer (D) a format string attack\nA: Let's think step by step. The Heartbleed Bug is a serious vulnerability in the popular OpenSSL cryptographic software library. Heartbleed resulted from improper input validation (due to a missing bounds check) in the implementation of the TLS heartbeat extension. The vulnerability was classified as a buffer over-read, a situation where more data can be read than should be allowed. The answer is (C).", "conceptual_physics": "\nThe following are multiple choice questions (with answers) about conceptual physics.\n\nQ: Colors in a soap bubble result from light\n(A) converted to a different frequency (B) deflection (C) interference (D) polarization\nA: Let's think step by step. In a soap bubble film, the light bounces between the two soap-air interfaces many times, interfering with itself constructively or destructively depending on the width of the film. This results in different colors being visible. The answer is (C).\n\nQ: Compared with the mass of a uranium atom undergoing fission, the combined masses of the products after fission are\n(A) less (B) more (C) the same (D) zero\nA: Let's think step by step. Fission releases energy, which comes from the rest mass of its initial nucleus. Thus the mass of the products is less than the mass of the reactant uranium nucleus. The answer is (A).\n\nQ: Things that are equivalent according to the equivalence principle are\n(A) space and time. (B) a traveling twin and a stay-at-home twin. (C) gravity and acceleration. (D) mass and energy.\nA: Let's think step by step. Einstein\u2019s famous equivalence principle states that gravity and acceleration are equivalent. The answer is (C).\n\nQ: Which of these three elements has the most mass per nucleon?\n(A) Hydrogen (B) Iron (C) Uranium (D) Same in each\nA: Let's think step by step. Due to nuclear binding energy, the mass of an atomic nucleus is less than the sum of individual masses of the free constituent protons and neutrons; this is known as the mass defect. Hydrogen has no mass defect because it has only a single nucleon, so it will have the most mass per nucleon. The answer is (A).\n\nQ: A model airplane flies slower when flying into the wind and faster with wind at its back. When launched at right angles to the wind a cross wind its groundspeed compared with flying in still air is\n(A) the same (B) greater (C) less (D) either greater or less depending on wind speed\nA: Let's think step by step. The plane\u2019s speed in the direction of the wind is greater than it would be in the absence of wind, and its direction orthogonal to the wind is the same as it would be in the absence of the wind. The total speed, which is these two components added in quadrature, is thus greater than the speed in still air. The answer is (B).", "econometrics": "The following are multiple choice questions (with answers) about econometrics.\n\nQ: Suppose now that a researcher wishes to use information criteria to determine the optimal lag length for a VAR. 500 observations are available for the bi-variate VAR, and the values of the determinant of the variance-covariance matrix of residuals are 0.0336, 0.0169, 0.0084, and 0.0062 for 1, 2, 3, and 4 lags respectively. What is the optimal model order according to Akaike's information criterion?\n(A) 1 lag (B) 2 lags (C) 3 lags (D) 4 lags\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Let\u2019s solve this problem step by step. First of all, let\u2019s recall that for a given set of data, Akaike's information criterion (AIC) allows us to measure how well a statistical model fits the data; it is an estimator of prediction error. Here in this problem we will need to use the formula ln(det(sigma_hat)) + (2 * k / T) to determine the values of Akaike\u2019s criterion, where ln denotes the natural log function, det the determinant function, k the total number of parameters in total (across both equations), and T the number of observations (which, in this case, is equal to 500). For 1 lag, the number of parameters in total is equal to 6; for 2 lags, it is 10; for 3 lags, it is 14; and for 4 lags, it is 18. Now, let\u2019s calculate the values of the criterion for each lag:\n(A) 1 lag: ln(0.0336) + (2 * 6 / 500) = ln(0.0336) + (12 / 500) = -3.369\n(B) 2 lags: ln(0.0169) + (2 * 10 / 500) = ln(0.0169) + (20 / 500) = -4.040\n(C) 3 lags: ln(0.0084) + (2 * 14 / 500) = ln(0.0084) + (28 / 500) =-4.724\n(D) 4 lags: ln(0.0062) + (2 * 18 / 500) = ln(0.0062) + (36 / 500) =-5.011\nBecause the optimal model order according to AIC minimizes the information criterion, the answer should be the one with the lowest value. In this case, (D) has the lowest value. The answer is (C).\n\nQ: Consider the following AR(1) model with the disturbances having zero mean and unit variance\nyt = 0.2 + 0.4 yt-1 + ut\nThe (unconditional) mean of y will be given by\n(A) 0.2 (B) 0.4 (C) 0.5 (D) 0.33\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Let\u2019s solve this problem step by step. If we have a an AR(1) model with the disturbances having zero mean and unit variance, then the unconditional mean of y is equal to the following:\nunconditional mean of y = (the intercept term) / (1 - autoregressive coefficient)\nWe know that the intercept term is 0.2 and the autoregressive coefficient is 0.4; thus, we have:\nunconditional mean of y = (0.2) / (1 - 0.4) = (0.2) / (0.6) = 2 / 6 = 1 / 3, which is approximately 0.33. That means that the answer should be (D) 0.33. The answer is (D).\n\nQ: What would be then consequences for the OLS estimator if heteroscedasticity is present in a regression model but ignored?\n(A) It will be biased (B) It will be inconsistent (C) It will be inefficient (D) All of (a), (b) and (c) will be true.\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Heteroscedasticity refers to the condition where the variance of the error terms is not constant across multiple observations. If heteroscedasticity is present in a regression model, then the coefficient estimates in the OLS estimator will be not only unbiased and consistent but also inefficient. Because (A) and (B) are incorrect choices and (C) is a correct choice, (D) cannot be the right answer. Ultimately, (C) is the only true choice. The answer is (C).\n\nQ: Suppose that a test statistic has associated with it a p-value of 0.08. Which one of the following statements is true?\n(i) If the size of the test were exactly 8%, we would be indifferent between rejecting and not rejecting the null hypothesis\n(ii) The null would be rejected if a 10% size of test were used\n(iii) The null would not be rejected if a 1% size of test were used\n(iv) The null would be rejected if a 5% size of test were used.\n(A) (ii) and (iv) only (B) (i) and (iii) only (C) (i), (ii), and (iii) only (D) (i), (ii), (iii), and (iv).\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Let\u2019s reason about each of the options.\n(i) is a true statement.\n(ii) is a true statement.\n(iii) is a true statement.\n(iv) is not a true statement. Thus, (i), (ii), and (iii) are true. The answer is (C).\n\nQ: For a stationary autoregressive process, shocks will\n(A) Eventually die away (B) Persist indefinitely (C) Grow exponentially (D) Never occur\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. This is a formal logic problem about stationally process. For a stationary autoregressive process, shocks will eventually die away. The answer is (A).", "electrical_engineering": "\nThe following are multiple choice questions (with answers) about electrical engineering.\n\nQ: A point pole has a strength of 4\u03c0 * 10^-4 weber. The force in newtons on a point pole of 4\u03c0 * 1.5 * 10^-4 weber placed at a distance of 10 cm from it will be\n(A) 15 N. (B) 20 N. (C) 7.5 N. (D) 3.75 N.\nA: Let's think step by step. The force between two point poles is given by m_1m_2/(mu_0 4 \\pi r^2), in analogy to Coulomb\u2019s law. Plugging in the values given in the question, we calculate that the force is approximately 15 N. The answer is (A).\n\nQ: The coil of a moving coil meter has 100 turns, is 40 mm long and 30 mm wide. The control torque is 240*10-6 N-m on full scale. If magnetic flux density is 1Wb/m2 range of meter is\n(A) 1 mA. (B) 2 mA. (C) 3 mA. (D) 4 mA.\nA: Let's think step by step. The torque on a coil in a uniform magnetic field is given by BANI, where B is the magnetic flux density, A is the area of the coil, N is the number of turns, and I is the current. So we have that I = (Torque)/(BAN), or 240e-6/(1200e-6 * 100 * 1) = 2e-3. The answer is (B).\n\nQ: In an SR latch built from NOR gates, which condition is not allowed\n(A) S=0, R=0 (B) S=0, R=1 (C) S=1, R=0 (D) S=1, R=1\nA: Let's think step by step. An SR latch is a set-reset latch; in the case where S=1 and R=1, the circuit has no stable state; instead a race condition will be produced within the circuit, so the device will be in an undefined state. So S=1, R=1 is an illegal input. The answer is (D).\n\nQ: Two long parallel conductors carry 100 A. If the conductors are separated by 20 mm, the force per meter of length of each conductor will be\n(A) 100 N. (B) 0.1 N. (C) 1 N. (D) 0.01 N.\nA: Let's think step by step. The magnetic force-per-length between two current-carrying conductors is given by \\mu_0 I_1 I_2 / (2 \\pi r), where $r$ is the separation distance and I_1 and I_2 are the currents. Plugging in 100 A for I_1 and I_2, and 20 mm for r, gives 0.1 N. The answer is (B).\n\nQ: In a 2 pole lap winding dc machine , the resistance of one conductor is 2\u03a9 and total number of conductors is 100. Find the total resistance\n(A) 200\u03a9 (B) 100\u03a9 (C) 50\u03a9 (D) 10\u03a9\nA: Let's think step by step. In lap winding, effectively two resistors are connected in parallel, so the actual resistance of each pair is 1 Ohm. Since we have 50 pairs, we get a total resistance of 50 Ohms. The answer is (C).", "elementary_mathematics": "The following are multiple choice questions (with answers) about elementary mathematics.\n\nQ: Olivia used the rule \"Add 11\" to create the number pattern shown below. 10, 21, 32, 43, 54. Which statement about the number pattern is true?\n(A) The 10th number in the pattern will be an even number.\n(B) The number pattern will never have two even numbers next to each other.\n(C) The next two numbers in the pattern will be an even number then an odd number.\n(D) If the number pattern started with an odd number then the pattern would have only odd numbers in it.\nA: Let's think step by step. Choice A is incorrect because every even-numbered term in the pattern is odd, and 10 is an even number. Choice B is correct, because adding an odd number (in this case 11) to an odd number produces an even number, and adding an odd number to an even number produces an odd number. Thus the terms in the pattern will alternate between odd and even, so there will never be two even numbers next to each other. Choice C is incorrect because the last term in the example is even (54), and we know that the terms will alternate between even and odd. Choice D is incorrect because the terms in the pattern will alternate between odd and even, regardless of the value of the first term. The answer is (B).\n\nQ: The population of the city where Michelle was born is 145,826. What is the value of the 5 in the number 145,826?\n(A) 5 thousands\n(B) 5 hundreds\n(C) 5 tens\n(D) 5 ones\nA: Let's think step by step. Choice A is correct, because there are three digits following the 5, so\nthe 5 is in the thousands place. Thus the other choices are incorrect. The answer is (A).\n\nQ: A store sells 107 different colors of paint. They have 25 cans of each color in storage. The number of cans of paint the store has in storage can be found using the expression below. 107 \u00d7 25. How many cans of paint does the store have in storage?\n(A) 749\n(B) 2,675\n(C) 2,945\n(D) 4,250\nA: Let's think step by step. We can calculate 107 x 25 = (100 x 25) + (7 x 25) = 2500 + 175 = 2675. The answer is (B).\n\nQ: A total of 30 players will play basketball at a park. There will be exactly 5 players on each team. Which statement correctly explains how to find the number of teams needed?\n(A) Add 5 to 30 to find 35 teams.\n(B) Divide 30 by 5 to find 6 teams.\n(C) Multiply 30 and 5 to find 150 teams.\n(D) Subtract 5 from 30 to find 25 teams.\nA: Let's think step by step. We want to find the number of teams. We know that there are 5 players/team, and 30 players. Thus to get the number of teams we divide players by players/team, so 30 players / 5 players/team = 6 teams. The answer is (B).\n\nQ: Which expression is equivalent to 5 x 9?\n(A) (5 x 4) x (6 x 5)\n(B) (5 x 5) + (5 x 4)\n(C) (5 x 5) + (5 x 9)\n(D) (5 x 9) x (6 x 9)\nA: Let's think step by step. We know that 9 = (5 + 4), so 5 x 9 = 5 x (5 + 4) = (5 x 5) + (5 x 4). The answer is (B).", "formal_logic": "The following are multiple choice questions (with answers) about formal logic.\n\nQ: Which of the given formulas of PL is the best symbolization of the following sentence?\nTurtles live long lives and are happy creatures, unless they are injured.\n(A) (L \u2022 H) \u2261 I (B) (L \u2022 H) \u2228 I (C) L \u2022 (H \u2228 I) (D) L \u2022 (H \u2283 R).\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. Let \u201cL\u201d denote \u201cliving long\u201d, H \u201cbeing happy\u201d, and \u201cI\u201d \u201cbeing injured\u201d. Now, consider each choice:\n(A) means (living long AND being happy) is equivalent to (being injured). \n(B) means (living long AND being happy) OR (being injured). \n(C) means (living long) AND (being happy OR being injured). \n(D) means (living long) AND (being happy implies being R), but what R denotes is not clear.\nObviously, (B) is the best symbolization of the original sentence. The answer is (B).\n\nQ: Select the best translation into predicate logic.George borrows Hector's lawnmower. (g: George; h: Hector; l: Hector's lawnmower; Bxyx: x borrows y from z).\n(A) Blgh (B) Bhlg (C) Bglh (D) Bghl\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. We are told that \u201cBxyx\u201d means \u201cx borrows y from z\u201d. We can rewrite \u201cGeorge borrows Hector's lawnmower\u201d as \u201cGeorge borrows a lawnmower from Hector\u201d, which can then be translated into predicate logic as \u201cBglh\u201d. The answer \u201cBglh\u201d appears in (C); therefore, (C) must be the correct answer. The answer is (C).\n\nQ: \nSelect the best English interpretation of the given arguments in predicate logic.\nDm\n(\u2200x)(Wx \u2283 ~Dx). \n(\u2200x)Wx \u2228 Ag\t/ (\u2203x)Ax\n(A) Marina is a dancer. Some weaklings are not dancers. Either everything is a weakling or Georgia plays volleyball. So something plays volleyball. (B) Marina is a dancer. No weakling is a dancer. Everything is either a weakling or plays volleyball. So something plays volleyball. (C) Marina is a dancer. Some weaklings are not dancers. Everything is either a weakling or plays volleyball. So something plays volleyball. (D) Marina is a dancer. No weakling is a dancer. Either everything is a weakling or Georgia plays volleyball. So something plays volleyball.\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. Let \u201cD\u201d denote \u201cbeing a dancer\u201d, \u201cm\u201d denote \u201cMaria\u201d, \u201cg\u201d denote \u201cGeorgia\u201d, \u201cW\u201d denote \u201cweakling\u201d, \u201cA\u201d denote \u201cplaying volleyball\u201d. Then, we have the following:\n1. Dm \u2192 Maria is a dance.\n2. (\u2200x)(Wx \u2283 ~Dx). \u2192 For all x, if x is a weakling, then x is not a dancer. In other words, no weakling is a dancer.\n3. (\u2200x)Wx \u2228 Ag\t/ (\u2203x)Ax \u2192 For all x, x is a weakling or Georgia plays volleyball. So there exists an x that plays volleyball. \nOptions (A) and (C) do claim that some weaklings are not dancers, but the second argument strongly states that no weakling is a dancer. Thus, we can eliminate them. Option (B) omits the important detail about Georgia playing volleyball. Option (D) has all the details presented in the arguments and is the best English interpretation of the arguments. The answer is (D).\n\nQ: Select the best translation into predicate logic: No people drive on Mars.\n(A) ~Pd (B) (\u2200x)(Px \u2228 ~Dx) (C) (\u2200x)(Px \u2283 ~Dx) (D) ~Dp\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. Let \u201cP\u201d denote \u201cbeing on Mars\u201d and \u201cD\u201d denote \u201cdriving on Mars\u201d. Then let\u2019s consider each option:\nOption (A): ~Pd \u2192 d is not on Mars.\nOption (B): (\u2200x)(Px \u2228 ~Dx) \u2192 For all x, x is on Mars and x do not drive on Mars.\nOption (C): (\u2200x)(Px \u2283 ~Dx) \u2192 For all x, x is on Mars implies that x do not drive on Mars.\nOption (D): ~Dp: \u2192 p do not drive on Mars.\nOf all these options, Option (C) appears to be the best and most meaningful interpretation of the argument \u201cNo people drive on Mars.\u201d The answer is (C).", "global_facts": "The following are multiple choice questions (with answers) about global facts.\n\nQ: As of 2017, how many of the world\u2019s 1-year-old children today have been vaccinated against some disease? *\n(A) 80% (B) 60% (C) 40% (D) 20%\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. According to data published by the World Health Organization, the nummber of 1-year-old children vaccinated in 2017 exceeds 80%. The answer is (A).\n\nQ: As of 2019, about what percentage of Americans agree that the state is run for the benefit of all the people?\n(A) 31% (B) 46% (C) 61% (D) 76%\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. In 2019, about 46% percentage of Americans agree that the state is run for the benefit of all the people. The answer is (B).\n\nQ: As of 2019, about what percentage of Russians say it is very important to have free media in our country without government/state censorship?\n(A) 38% (B) 53% (C) 68% (D) 83%\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. As of 2019, about 38% of Russians say it is very important to have free media in our country. The answer is (A).\n\nQ: As of 2015, since 1990 forests have ____ in Europe and have ____ in Africa and the Americas.\n(A) increased, increased (B) increased, decreased (C) decreased, increased (D) decreased, decreased\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. As of 2015, since 1990 forests have increased in Europe and have decreased in Africa and the Americas. The answer is (B).\n\nQ: Which of the following pairs of statements are both true (as of 2019)?\n(A) People tend to be optimistic about their own future and the future of their nation or the world. (B) People tend to be optimistic about their own future but pessimistic about the future of their nation or the world. (C) People tend to be pessimistic about their own future but optimistic about the future of their nation or the world. (D) People tend to be pessimistic about their own future and the future of their nation or the world.\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. As of 2019, most people tend to be optimistic about their own future but pessimistic about the future of their nation or the world. The answer is (B).", "high_school_biology": "The following are multiple choice questions (with answers) about high school biology.\n\nQ: In animal cells, which of the following represents the most likely pathway that a secretory protein takes as it is synthesized in a cell?\n(A) Plasma membrane\u2013Golgi apparatus\u2013ribosome\u2013secretory vesicle\u2013rough ER (B) Ribosome\u2013Golgi apparatus\u2013rough ER\u2013secretory vesicle\u2013plasma membrane (C) Plasma membrane\u2013Golgi apparatus\u2013ribosome\u2013secretory vesicle\u2013rough ER (D) Ribosome\u2013rough ER\u2013Golgi apparatus\u2013secretory vesicle\u2013plasma membrane\nA: Let's think step by step. Protein synthesis starts at the ribosome, so we can eliminate (A) and (C). The ribosome is often in the endoplasmic reticulum and moves from there to the Golgi apparatus, where it is modified and packaged into a vesicle. The vesicle then floats to the plasma membrane and is secreted. The answer is (D).\n\nQ: A mutation in a bacterial enzyme changed a previously polar amino acid into a nonpolar amino acid. This amino acid was located at a site distant from the enzyme\u2019s active site. How might this mutation alter the enzyme\u2019s substrate specificity?\n(A) By changing the enzyme\u2019s pH optimum (B) By changing the enzyme\u2019s location in the cell (C) By changing the shape of the protein (D) An amino acid change away from the active site cannot alter the enzyme\u2019s substrate specificity.\nA: Let's think step by step. A change in an amino acid leads to a change in the primary structure of the protein. A change in the primary structure may lead to a change in the secondary and the tertiary structure of the protein. A change in the tertiary structure means a change in the shape of the protein, so (C) has to be correct. Since the change does not affect the active site of the enzyme, we do not expect the activity of the enzyme to be affected. The answer is (C).\n\nQ: Which of the following is not a way to form recombinant DNA?\n(A) Translation (B) Conjugation (C) Specialized transduction (D) Transformation\nA: Let's think step by step. The introduction of foreign DNA or RNA into bacteria or eukaryotic cells is a common technique in molecular biology and scientific research. There are multiple ways foreign DNA can be introduced into cells including transformation, transduction, conjugation, and transfection. In contrast, (A) is not a way to form DNA: during translation the ribosomes synthesize proteins from RNA. The answer is (A).\n\nQ: Homologous structures are often cited as evidence for the process of natural selection. All of the following are examples of homologous structures EXCEPT\n(A) the wings of a bird and the wings of a bat (B) the flippers of a whale and the arms of a man (C) the pectoral fins of a porpoise and the flippers of a seal (D) the forelegs of an insect and the forelimbs of a dog\nA: Let's think step by step. \u200b\u200bHomologous structures are similar physical features in organisms that share a common ancestor \u200b\u200bbut different functions. Comparisons (B) and (C) are clearly homologous because they share a common ancestor and the structures serve different purposes. Bat wings and birg wings are also homologous, while they are both wings, the forelimbs serve different purposes. Insects and dogs are very far ancestors since one is vertebrate while the other is invertebrate and the forelimbs serve the same purpose, so they are not homologous. The answer is (D).\n\nQ: Which of the following is not known to be involved in the control of cell division?\n(A) Cyclins (B) Protein kinases (C) Checkpoints (D) Fibroblast cells\nA: Let's think step by step. Normal cells move through the cell cycle in a regulated way. At the checkpoint stage, they use information about their own internal state and cues from the environment around them to decide whether to proceed with cell division. Cues like these act by changing the activity of core cell cycle regulators inside the cell. The most common regulators are cyclins and cyclin-dependent kinases. Fibroblast cells do not play any role in cell division. The answer is (D).", "high_school_chemistry": "The following are multiple choice questions (with answers) about high school chemistry.\n\nQ: Which of the following is considered an acid anhydride?\n(A) HCl (B) H2SO3 (C) SO2 (D) Al(NO3)3\nA: Let's think step by step. An acid anhydride is a compound that is derived by removing water from an acid. The chemical formula for water is H2O, which means that we need to determine which of these options, when combined with H2O, forms an acid. SO2, or Sulfur dioxide, when combined with H2O, makes H2SO4, or sulfuric acid. The answer is (C).\n\nQ: Which of the following is expected to be a polar molecule?\n(A) PCl4F (B) BF3 (C) CO2 (D) Si(CH3)4\nA: Let's think step by step. A polar molecule is one that has a slightly positive charge on one end of the molecule and a slightly negative charge on the other end. Boron trifluoride (BF3) has Boron as the center atom and three fluorine atoms attached to it; it is trigonal planar and symmetric, so it is nonpolar. Carbon Dioxide (CO2) has Carbon as the central atom with double bonds to two Oxygen atoms - this is also symmetrical and therefore nonpolar. The same is the case for tetramethyl silane (SI(CH3)4), which is a Silicon atom surrounded by four methyl groups. The structure of PCL4F is that Phosphorus is the central atom, attached to four chlorines and one fluorine atom. This is asymmetrical, and therefore has a net dipole and is expected to be a polar molecule. The answer is (A).\n\nQ: From the solubility rules, which of the following is true?\n(A) All chlorides, bromides, and iodides are soluble (B) All sulfates are soluble (C) All hydroxides are soluble (D) All ammonium-containing compounds are soluble\nA: Let's think step by step. The chlorides, bromides, and iodides of lead, silver, and mercury are not soluble in water. This rules out (A). The sulfates of lead, barium, and calcium are not soluble in water, which rules out (B). The hydroxides of any metal besides sodium, potassium, ammonium, calcium, and barium are insoluble. This rules out (C). Typically ammonium ions indicate a soluble ionic substance. The answer is (D).\n\nQ: A new compound is synthesized and found to be a monoprotic acid with a molar mass of 248 g/mol. When 0.0050 mol of this acid are dissolved in 0.500 L of water, the pH is measured as 3.89. What is the pKa of this acid?\n(A) 3.89 (B) 7.78 (C) 5.78 (D) 2.33\nA: Let's think step by step. Recall that $[A] = [H^{+}]$. Here, this is equal to $$10^{-3.89}$. Then we have $K_{a} = $\nrac{[H^{+}][A^{-}]}{[HA]} = \nrac{10^{-3.89} \\cdot 10^{-3.89}}{10^{-2}}. The resulting exponent is $-3.89 + (-3.89) - (-2) = 5.78$, therefore $K_a = 10^{-5.78}$. The $pK_a$ is the negative log of $K_a$, which is equal to $5.78$. The answer is (C).\n\nQ: A solution contains 2.00 mole of acetic acid, CH3COOH, and 1.00 mole of calcium acetate, Ca(CH3COO)2. The solution is able to resist the addition of a small amount of strong acid or strong base with only minor changes in the pH of the solution. Larger quantities of strong acid or strong base can cause a significant change in pH. How many moles of nitric acid, HNO3, may be added before the pH begins to change significantly?\n(A) 0.500 mole (B) 1.00 mole (C) 2.00 mole (D) 3.00 mole\nA: Let's think step by step. We would like to compute the buffer capacity of this solution. First we write the equation for the ionization of the weak acid, in this case of acetic acid. $CH_{3}COOH (aq) + H_{2}O \nightarrow H_{3}O^{+} + CH3COO^{-}$. The conjugate base is therefore the acetate ion. The added strong acid, Nitric acid, will react with the conjugate base. Therefore the maximum amount of acid that can be added will be equal to the amount of acetate ion, or 2 moles. The answer is (C).", "high_school_computer_science": "The following are multiple choice questions (with answers) about high school computer science.\n\nQ: Which of the following is an example of the use of a device on the Internet of Things (IoT) ?\n(A) A car alerts a driver that it is about to hit an object. (B) A hiker uses a G P S watch to keep track of her position. (C) A refrigerator orders milk from an online delivery service when the milk in the refrigerator is almost gone. (D) A runner uses a watch with optical sensors to monitor his heart rate.\nA: Let's think step by step. The term Internet of Things (IoT) refers to common devices which are connected to the internet, enabling new functionality. Choice A is incorrect because it does not describe an internet connected device. In choice B, the watch is only described as having GPS functionality but no internet connectivity. Choice C describes a common device (a refrigerator) which has internet connectivity enabling new functionality (online ordering). Choice D does not mention internet connectivity for the watch, only optical sensors. The answer is (C).\n\nQ: Many Web browsers allow users to open anonymous windows. During a browsing session in an anonymous window, the browser does not record a browsing history or a list of downloaded files. When the anonymous window is exited, cookies created during the session are deleted. Which of the following statements about browsing sessions in an anonymous window is true?\n(A) The activities of a user browsing in an anonymous window will not be visible to people who monitor the user's network, such as the system administrator. (B) Items placed in a Web store's shopping cart for future purchase during the anonymous browsing session will not be saved on the user's computer. (C) A user will not be able to log in to e-mail or social media accounts during the anonymous browsing session. (D) A user browsing in an anonymous window will be protected from viruses launched from any web sites visited or files downloaded.\nA: Let's think step by step. Choice A is incorrect as it only describes network traffic, which an anonymous browser does not change. Choice B is correct as it correctly describes how an anonymous browser will prevent saving data on the user\u2019s computer after the session is ended. Choice C is incorrect because an anonymous browser will not prevent logging in to email or social media accounts. Choice D is incorrect because an anonymous browser in itself performs no virus protection. The answer is (B).\n\nQ: In the program below, the initial value of X is 5 and the initial value of Y is 10.\nIF (X < 0){\n DISPLAY (\"Foxtrot\")\n} ELSE {\n IF (X > Y){\n DISPLAY (\"Hotel\")\n } ELSE {\n IF (Y > 0){\n DISPLAY (\"November\")\n } ELSE {\n DISPLAY (\"Yankee\")\n }\n }\n}\nWhat is displayed as a result of running the program?\n(A) Foxtrot (B) Hotel (C) November (D) Yankee\nA: Let's think step by step. Because X has the value 5, the first conditional IF (X < 0) is false, so we move to the first ELSE clause. Because X is 5 and Y is 10, the second conditional IF (X > Y) is false, so we move to the following ELSE clause. Since Y is 10, the conditional IF (Y > 0) is true, so the command DISPLAY (\"November\") is executed. The answer is (C).\n\nQ: What is the output of \"abc\"[::-1] in Python 3?\n(A) Error (B) abc (C) cba (D) c\nA: Let's think step by step. We know that the slicing operator [::-1] takes all of the elements in the string in reverse order, so we reverse the order of the string \"abc\", resulting in \"cba\". The answer is (C).\n\nQ: A list of numbers has n elements, indexed from 1 to n. The following algorithm is intended to display the number of elements in the list that have a value greater than 100. The algorithm uses the variables count and position. Steps 3 and 4 are missing.\n Step 1: Set count to 0 and position to 1.\n Step 2: If the value of the element at index position is greater than 100, increase the value of count by 1.\n Step 3: (missing step)\n Step 4: (missing step)\n Step 5: Display the value of count.\nWhich of the following could be used to replace steps 3 and 4 so that the algorithm works as intended?\n(A) Step 3: Increase the value of position by 1.\n Step 4: Repeat steps 2 and 3 until the value of count is greater than 100.\n(B) Step 3: Increase the value of position by 1.\n Step 4: Repeat steps 2 and 3 until the value of position is greater than n.\n(C) Step 3: Repeat step 2 until the value of count is greater than 100.\n Step 4: Increase the value of position by 1.\n(D) Step 3: Repeat step 2 until the value of position is greater than n.\n Step 4: Increase the value of count by 1.\nA: Let's think step by step. Choice A is incorrect, because its Step 4 has an incorrect termination condition, stopping when count is greater than 100. We need to stop after inspecting all elements in the list. Choice B is correct because it correctly increments both count and position, and correctly repeats these steps and terminates when all elements in the list have been inspected. Choice C is incorrect because it incorrectly increments the variable count until its value is greater than 100, regardless of the elements in the list. Choice D is incorrect because its step 3 does not increment the value of position, so it will repeat forever. The answer is (B).", "high_school_european_history": "The following are multiple choice questions (with answers) about high school european history.\n\nQ: This question refers to the following information.\nAlbeit the king's Majesty justly and rightfully is and ought to be the supreme head of the Church of England, and so is recognized by the clergy of this realm in their convocations, yet nevertheless, for corroboration and confirmation thereof, and for increase of virtue in Christ's religion within this realm of England, and to repress and extirpate all errors, heresies, and other enormities and abuses heretofore used in the same, be it enacted, by authority of this present Parliament, that the king, our sovereign lord, his heirs and successors, kings of this realm, shall be taken, accepted, and reputed the only supreme head in earth of the Church of England, called Anglicans Ecclesia; and shall have and enjoy, annexed and united to the imperial crown of this realm, as well the title and style thereof, as all honors, dignities, preeminences, jurisdictions, privileges, authorities, immunities, profits, and commodities to the said dignity of the supreme head of the same Church belonging and appertaining; and that our said sovereign lord, his heirs and successors, kings of this realm, shall have full power and authority from time to time to visit, repress, redress, record, order, correct, restrain, and amend all such errors, heresies, abuses, offenses, contempts, and enormities, whatsoever they be, which by any manner of spiritual authority or jurisdiction ought or may lawfully be reformed, repressed, ordered, redressed, corrected, restrained, or amended, most to the pleasure of Almighty God, the increase of virtue in Christ's religion, and for the conservation of the peace, unity, and tranquility of this realm; any usage, foreign land, foreign authority, prescription, or any other thing or things to the contrary hereof notwithstanding.\nEnglish Parliament, Act of Supremacy, 1534\nFrom the passage, one may infer that the English Parliament wished to argue that the Act of Supremacy would\n(A) give the English king a new position of authority (B) give the position of head of the Church of England to Henry VIII alone and exclude his heirs (C) establish Calvinism as the one true theology in England (D) end various forms of corruption plaguing the Church in England\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The Act of Supremacy states that it grants authority to the king \"to repress and extirpate all errors, heresies, and other enormities and abuses\", referring to the corruption in the Church of England. The answer is (D).\n\nQ: This question refers to the following information.\nRead the following excerpt.\nThe revolutionary seed had penetrated into every country and spread more or less. It was greatly developed under the r\u00e9gime of the military despotism of Bonaparte. His conquests displaced a number of laws, institutions, and customs; broke through bonds sacred among all nations, strong enough to resist time itself; which is more than can be said of certain benefits conferred by these innovators.\nThe monarchs will fulfil the duties imposed upon them by Him who, by entrusting them with power, has charged them to watch over the maintenance of justice, and the rights of all, to avoid the paths of error, and tread firmly in the way of truth. Placed beyond the passions which agitate society, it is in days of trial chiefly that they are called upon to despoil realities of their false appearances, and to show themselves as they are, fathers invested with the authority belonging by right to the heads of families, to prove that, in days of mourning, they know how to be just, wise, and therefore strong, and that they will not abandon the people whom they ought to govern to be the sport of factions, to error and its consequences, which must involve the loss of society.\nUnion between the monarchs is the basis of the policy which must now be followed to save society from total ruin. . . .\nLet them not confound concessions made to parties with the good they ought to do for their people, in modifying, according to their recognized needs, such branches of the administration as require it.\nLet them be just, but strong; beneficent, but strict.\nLet them maintain religious principles in all their purity, and not allow the faith to be attacked and morality interpreted according to the social contract or the visions of foolish sectarians.\nLet them suppress Secret Societies; that gangrene of society.\n\u2014Klemens von Metternich, Political Confession of Faith, 1820\nWhich of the following was the greatest cause of the fears expressed by Metternich in the document above?\n(A) The ideas of personal liberty and nationalism conceived during the Enlightenment resulted in radical revolutions that could spread throughout Europe. (B) The conquest of Europe by Napoleon led to the creation of new factions and shifted the European balance of power. (C) The power of monarchs had grown to the point where it needed to be checked by other powers within each nation or domination of civilians would occur. (D) The rising and falling economic cycle of the newly emerging capitalist economy could lead to civilian unrest that must be suppressed.\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The fears of revolution in early 19th century Europe expressed by Klemens von Metternich, a conservative Austrian statesman, were a direct result of the age of Enlightenment, a period of European history where the absolute power of the monarchy was challenged with ideas of individual liberty and nationalism, leading to the French revolution and its effects all over Europe. The answer is (A).\n\nQ: This question refers to the following information.\nThe excerpts below are from the Navigation Acts of 1651.\n[A]fter the first day of December, one thousand six hundred fifty and one, and from thence forwards, no goods or commodities whatsoever of the growth, production or manufacture of Asia, Africa or America, or of any part thereof; or of any islands belonging to them, or which are described or laid down in the usual maps or cards of those places, as well of the English plantations as others, shall be imported or brought into this Commonwealth of England, or into Ireland, or any other lands, islands, plantations, or territories to this Commonwealth belonging, or in their possession, in any other ship or ships, vessel or vessels whatsoever, but only in such as do truly and without fraud belong only to the people of this Commonwealth, or the plantations thereof, as the proprietors or right owners thereof; and whereof the master and mariners are also of the people of this Commonwealth, under the penalty of the forfeiture and loss of all the goods that shall be imported contrary to this act, , , ,\n[N]o goods or commodities of the growth, production, or manufacture of Europe, or of any part thereof, shall after the first day of December, one thousand six hundred fifty and one, be imported or brought into this Commonwealth of England, or any other lands or territories to this Commonwealth belonging, or in their possession, in any ship or ships, vessel or vessels whatsoever, but in such as do truly and without fraud belong only to the people of this Commonwealth, and in no other, except only such foreign ships and vessels as do truly and properly belong to the people of that country or place, of which the said goods are the growth, production or manufacture.\nWhich of the following best describes the outcome of the Navigation Acts of 1651?\n(A) They served as a catalyst for the growth of English shipping and overseas trade, but did little to limit the prospects of the Dutch in the seventeenth century. (B) They brought about almost immediate hardships for the Dutch economy as their dominance of overseas trade quickly ended. (C) They were rescinded during the restoration of the Stuarts as they sought normal diplomatic relations with the Dutch so not as to need Parliament's financial support for war. (D) They led to nearly a century of recurrent war between England and the Netherlands, which would not end until after American independence.\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The Navigation Acts of 1651 helped English shipping by restricting the ability of ships from other European countries, especially the Dutch, to transport goods from colonies in Asia and Africa into England. The answer is (A).\n\nQ: This question refers to the following information.\nIn Russia there was nothing going on well, and [Souvarine] was in despair over the news he had received. His old companions were all turning to the politicians; the famous Nihilists who made Europe tremble-sons of village priests, of the lower middle class, of tradesmen-could not rise above the idea of national liberation, and seemed to believe that the world would be delivered-when they had killed their despot&\u2026\n\"Foolery! They'll never get out of it with their foolery.\"\nThen, lowering his voice still more, in a few bitter words he described his old dream of fraternity. He had renounced his rank and his fortune; he had gone among workmen, only in the hope of seeing at last the foundation of a new society of labour in common. All the sous in his pockets had long gone to the urchins of the settlement; he had been as tender as a brother with the colliers, smiling at their suspicion, winning them over by his quiet workmanlike ways and his dislike of chattering. But decidedly the fusion had not taken place.\nHis voice changed, his eyes grew bright, he fixed them on \u00e9tienne, directly addressing him:\n\"Now, do you understand that? These hatworkers at Marseilles who have won the great lottery prize of a hundred thousand francs have gone off at once and invested it, declaring that they are going to live without doing anything! Yes, that is your idea, all of you French workmen; you want to unearth a treasure in order to devour it alone afterwards in some lazy, selfish corner. You may cry out as much as you like against the rich, you haven't got courage enough to give back to the poor the money that luck brings you. You will never be worthy of happiness as long as you own anything, and your hatred of the bourgeois proceeds solely from an angry desire to be bourgeois yourselves in their place.\"\n\u00e9mile Zola, French writer, Germinal, 1885\nThe passage displays the direct concern for the welfare of the working classes that was typically a part of which movement?\n(A) Capitalist (B) Scientific (C) Communist (D) Existentialist\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The modern Communist movement aims to establish a classless society based on communal ownership and distribution of property and means of production, thereby especially benefiting the working classes. The answer is (C).\n\nQ: This question refers to the following information.\nThe following excerpt is from a pamphlet.\nYou will do me the justice to remember, that I have always strenuously supported the Right of every man to his own opinion, however different that opinion might be to mine. He who denies to another this right, makes a slave of himself to his present opinion, because he precludes himself the right of changing it.\nThe most formidable weapon against errors of every kind is Reason. I have never used any other, and I trust I never shall.\nThe circumstance that has now taken place in France of the total abolition of the whole national order of priesthood, and of everything appertaining to compulsive systems of religion, and compulsive articles of faith, has not only precipitated my intention, but rendered a work of this kind exceedingly necessary, lest in the general wreck of superstition, of false systems of government, and false theology, we lose sight of morality, of humanity, and of the theology that is true.\nI believe in one God, and no more; and I hope for happiness beyond this life.\nI believe in the equality of man; and I believe that religious duties consist in doing justice, loving mercy, and endeavoring to make our fellow-creatures happy.\nI do not believe in the creed professed by the Jewish church, by the Roman church, by the Greek church, by the Turkish church, by the Protestant church, nor by any church that I know of. My own mind is my own church.\nAll national institutions of churches, whether Jewish, Christian or Turkish, appear to me no other than human inventions, set up to terrify and enslave mankind, and monopolize power and profit.\nI do not mean by this declaration to condemn those who believe otherwise; they have the same right to their belief as I have to mine.\n\u2014Thomas Paine, The Age of Reason, 1794\u20131795\nWhich of the following Enlightenment philosophes designed a system of checks and balances for government to avoid abuses of power?\n(A) Jean Jacques Rousseau (B) Baron Montesquieu (C) Mary Wollstonecraft (D) Adam Smith\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. Baron Montesquieu was a 18th centrury French philsopher who wrote extensively against the monoplization of power and advocated for a system of checks and balances in government to prevent the rise of despotism. The answer is (B).", "high_school_geography": "The following are multiple choice questions (with answers) about high school geography.\n\nQ: Which one of the following items is an example of nonmaterial culture?\n(A) Dove soap (B) Dove candy bar (C) Dove symbol (D) A dove (bird).\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. Nonmaterial culture consists of cultural ideas, beliefs or symbols that are not physical objects. The answer is (C).\n\nQ: During the third stage of the demographic transition model, which of the following is true?\n(A) Birth rates increase and population growth rate is less rapid. (B) Birth rates decline and population growth rate is less rapid. (C) Birth rates increase and population growth rate increases. (D) Birth rates decrease and population growth rate increases.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. The demographic transition model models the five different stages of population growth as a country goes through economic development, where the third stage refers to a period of declining birth rates and lower population growth. The answer is (B).\n\nQ: The practice of hiring a foreign third-party service provider to run an operation is called\n(A) outsourcing. (B) offshoring. (C) maquiladoras. (D) locational interdependence.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. \"Offshoring\" literally means to move or base some of the activities or processes of a company to a foreign country. The answer is (B).\n\nQ: Which of the following statements is NOT accurate regarding the services provided by local governments in the United States?\n(A) Duplication of efforts occurs often. (B) Social problems of the central city spill over into the surrounding residential suburbs. (C) Inefficiency in providing services occurs often. (D) One neighborhood's efforts to reduce pollution are always supported by neighboring communities.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. There may be economic, social or political reasons for two neighboring communities and their local governments not agreeing to pollution reduction efforts initiated by one of them. The answer is (D).\n\nQ: The rate of natural increase of a population is found by subtracting the\n(A) crude death rate from the crude birth date. (B) crude birth rate from the crude death rate. (C) doubling time from the crude birth rate. (D) fertility rate from the crude death rate.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. The difference between number of births and deaths gives the population increase at any given time. The answer is (A).", "high_school_government_and_politics": "The following are multiple choice questions (with answers) about high school government and politics.\n\nQ: Which of the following best states an argument made by James Madison in The Federalist number 10?\n(A) Honest politicians can prevent factions from developing. (B) Factions are more likely to occur in large republics than in small ones. (C) The negative effects of factionalism can be reduced by a republican government. (D) Free elections are the people's best defense against factionalism.\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. In the Federalist number 10, James Madison advocated for a representative republican form of government to guard against factionalism. The answer is (C).\n\nQ: The term \"budget deficit\" refers to the\n(A) annual increase in federal spending on the military (B) amount of interest on the national debt (C) difference between the initial budget proposals made by the president and Congress (D) amount the government spends in excess of its revenues\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. When the goverment spends more than it earns, their difference is the budget deficit. The answer is (D).\n\nQ: Which of the following statements about cabinet departments is FALSE?\n(A) They are established by the legislative branch. (B) Their members often don't have much influence over presidential decisions. (C) They cannot all be run by leaders who belong to the same political party the president does. (D) Not every federal agency is a cabinet department.\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. There is no law stipulating that some cabinet department leaders have to belong to a political party different from that of the president. The answer is (C).\n\nQ: Which of the following cases established the precedent that a defendant must be informed of the right to remain silent, the right to a lawyer, and protection from self-incrimination?\n(A) Weeks v. United States (B) Betts v. Brady (C) Mapp v. Ohio (D) Miranda v. Arizona\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. In the landmark Miranda v. Arizona in 1966, the US Supreme Court, based on the Fifth and Sixth Amendment of the US Constitution, guaranteed a defendant's right to an attorney and protection from self-incrimination. The answer is (D).\n\nQ: Uncertainty over the limits to presidential power is caused primarily by the fact that\n(A) the constitutional definition of those powers is broad and unspecific (B) most people agree that the Constitution places too many limits on presidential power (C) the Supreme Court consistently refuses to rule on cases concerning presidential powers (D) constitutional amendments have greatly increased presidential powers\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. The US Constitution is not very specific about the powers of the president, leading to uncertainty over its limits. The answer is (A).", "high_school_macroeconomics": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\nQ: Which of the following policies best describes supply-side fiscal policy?\n(A) An increase in the money supply (B) Increased government spending (C) Lower taxes on research and development of new technology (D) Higher taxes on household income\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. Supply-side fiscal policy stimulates the economy by encouraging more production of goods and services through reduction in taxes and deregulation. The answer is (C).\n\nQ: The short-run Phillips curve indicates a\n(A) direct relation between unemployment and inflation (B) direct relation between price and quantity demanded (C) inverse relation between price and quantity demanded (D) inverse relation between unemployment and inflation\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. The short-run Phillips curve shows that whenever unemployment decreases below a natural level, the inflation starts increasing, and vice-versa. The answer is (D).\n\nQ: Holding all else equal which of the following monetary policies would be used to boost U.S. exports?\n(A) Increasing the discount rate (B) Increasing the reserve ratio (C) Buying government securities (D) Lowering tariffs\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. Buying government securities leads to reduction in demand for US dollars from foreign buyers, thereby making it cheaper and hence making US exports more attractive. The answer is (C).\n\nQ: A federal deficit occurs when\n(A) exports exceed imports. (B) imports exceed exports. (C) federal tax collections exceed spending. (D) federal spending exceeds federal tax revenues.\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. A federal deficit occurs when federal spending exceeds federal income which is primarily from tax revenues. The answer is (D).\n\nQ: Which of the following is not included in the U.S. GDP?\n(A) The U.S. military opens a new base in a foreign country with 1000 U.S. personnel. (B) Japanese consumers buy thousands of CDs produced in the United States. (C) An American pop singer performs a sold-out concert in Paris. (D) A French theatrical production tours dozens of American cities.\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. The economic transactions related to the performance of the American pop-singer in Paris happens entirely outside the U.S. and hence is not included in the GDP numbers. The answer is (C).", "high_school_mathematics": "The following are multiple choice questions (with answers) about high school mathematics.\n\nQ: Simplify and write the result with a rational denominator: $$\\sqrt{\\sqrt[3]{\\sqrt{\\frac{1}{729}}}}$$\n(A) \\frac{3\\sqrt{3}}{3} (B) \\frac{1}{3} (C) \\sqrt{3} (D) \\frac{\\sqrt{3}}{3}\nA: Let's think step by step. Factoring $729=3^6$ and combining the roots $\\frac{1}{2}\\frac{1}{3}\\frac{1}{2}=\\frac{1}{12}$, we get that $\\sqrt{\\sqrt[3]{\\sqrt{\\frac{1}{729}}}}=\\left(\\frac{1}{3^6}\\right)^{\\frac{1}{12}}=\\frac{1}{3^{\\frac{1}{2}}}=\\frac{3}{\\sqrt{3}}$ The answer is (D).\n\nQ: Five thousand dollars compounded annually at an $x\\%$ interest rate takes six years to double. At the same interest rate, how many years will it take $\\$300$ to grow to $\\$9600$?\n(A) 12 (B) 1 (C) 30 (D) 5\nA: Let's think step by step. To go from $\\$300$ to $\\$9600$, the value must go up by a factor of $9600/300=32=2^5$. Since at this interest rate it takes six years for it to double, it will take $5*6=30$ years to grow to $\\$9600$. The answer is (C).\n\nQ: Ten students take a biology test and receive the following scores: 45, 55, 50, 70, 65, 80, 40, 90, 70, 85. What is the mean of the students\u2019 test scores?\n(A) 55 (B) 60 (C) 62 (D) 65\nA: Let's think step by step. There are 10 students and the sum of their scores is $45 + 55 + 50 + 70 + 65 + 80 + 40 + 90 + 70 + 85 = 650$, the mean is $650/10=65$. The answer is (D).\n\nQ: The variable $x$ varies directly as the square of $y$, and $y$ varies directly as the cube of $z$. If $x$ equals $-16$ when $z$ equals 2, what is the value of $x$ when $z$ equals $\\frac{1}{2}$?\n(A) -1 (B) 16 (C) -\\frac{1}{256} (D) \\frac{1}{16}\nA: Let's think step by step. We know that $x \\propto y^2$ and $y \\propto z^3$, so $x = k z^6$ for some constant $k$. Plugging in for $x=-16$ and $z=2$, the constant value is $k=\\frac{x}{z^6}=\\frac{-16}{64}=-\\frac{1}{4}$. So, when $z=\\frac{1}{2}$, the value of $x$ is $x=kz^6=-\\frac{1}{4}\\frac{1}{2^6}=-\\frac{1}{256}$. The answer is (C).\n\nQ: Joe was in charge of lights for a dance. The red light blinks every two seconds, the yellow light every three seconds, and the blue light every five seconds. If we include the very beginning and very end of the dance, how many times during a seven minute dance will all the lights come on at the same time? (Assume that all three lights blink simultaneously at the very beginning of the dance.)\n(A) 3 (B) 15 (C) 6 (D) 5\nA: Let's think step by step. The least common multiple of 2, 3 and 5 is 30, so during a 7 minute dance, all the three lights will come on at the same time $2*7+1=15$ times. The answer is (B).", "high_school_microeconomics": "The following are multiple choice questions (with answers) about high school microeconomics.\n\nQ: Which of the following is necessarily a characteristic of oligopoly?\n(A) Free entry into and exit from the market (B) A few large producers (C) One producer of a good with no close substitutes (D) A homogenous product\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. An oligopoly is when a market is dominated by just one or a few number of sellers or producers. To get oligopoly, the market should have high barriers to new entry, and the product has differentiation. The answer is (B).\n\nQ: If the government subsidizes producers in a perfectly competitive market, then\n(A) the demand for the product will increase (B) the demand for the product will decrease (C) the consumer surplus will increase (D) the consumer surplus will decrease\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. (A) and (B) are wrong because the demand curve does not change at all. If the government subsidizes producers, the supply will increase, and thus the consumer surplus also increases. The answer is (C).\n\nQ: Which of the following is true of a price floor?\n(A) The price floor shifts the demand curve to the left. (B) An effective floor creates a shortage of the good. (C) The price floor shifts the supply curve of the good to the right. (D) To be an effective floor, it must be set above the equilibrium price.\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. Price floor does not shift the demand or shift curve. An effective price floor should be set above the equilibrium price, otherwise the market bears and the floor does not have effective effect. The answer is (D).\n\nQ: The concentration ratio for a monopoly is\n(A) 0 (B) 5 (C) 10 (D) 100\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. The concentration ratio is calculated as the sum of market share of a specific number of largest companies. Monopoly means one company or entity controls the entire market, therefore, the concentration ratio is 100 percent. The answer is (D).\n\nQ: In a competitive labor market for housepainters, which of the following would increase the demand for housepainters?\n(A) An effective minimum wage imposed on this labor market. (B) An increase in the price of gallons of paint. (C) An increase in the construction of new houses. (D) An increase in the price of mechanical painters so long as the output effect exceeds the substitution effect.\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. An increase in the construction of new houses means an increase demand of in-house painting, thus increases the demand for housepainters. The answer is (C).", "high_school_physics": "The following are multiple choice questions (with answers) about high school physics.\n\nQ: A microwave oven is connected to an outlet, 120 V, and draws a current of 2 amps. At what rate is energy being used by the microwave oven?\n(A) 10 W (B) 30 W (C) 60 W (D) 240 W\nA: Let's think step by step. Rate of energy usage is known as power; in an dissipative electrical circuit, power is given by voltage times current. So in our case, the power is 120 V times 2 amps, or 240 W. The answer is (D).\n\nQ: A point charge, Q = +1 mC, is fixed at the origin. How much work is required to move a charge, Q = +8 \u00b5C, from the point (0, 4 meters) to the point (3 meters, 0)?\n(A) 3.5 J (B) 6.0 J (C) 22.5 J (D) 40 J\nA: Let's think step by step. To calculate the work required to move a charge from one location to another in a fixed electric field, it is enough to calculate the potential difference between the two locations. Here, the potential only depends on the distance between the charges; it\u2019s $k q_1 q_2 / r$, where $k$ is Coulomb\u2019s constant. Plugging in values $q_1 = $ 1 mC, $q_2 = 8 \\mu$ C, gives the answer as 5.992 J, which rounds to 6 J. The answer is (B).\n\nQ: Which of the following conditions will ensure that angular momentum is conserved? I. Conservation of linear momentum II. Zero net external force III. Zero net external torque\n(A) I and II only (B) I and III only (C) II and III only (D) III only\nA: Let's think step by step. Torque is defined as the change in angular momentum; if there is zero external torque, angular momentum is conserved. The answer is (D).\n\nQ: A photocell of work function \u03d5 = 2eV is connected to a resistor in series. Light of frequency f = 1 \u00d7 10^15 Hz hits a metal plate of the photocell. If the power of the light is P = 100 W, what is the current through the resistor?\n(A) 2:00 AM (B) 6:00 AM (C) 12:00 AM (D) 24 A\nA: Let's think step by step. The only answer above which has units of current is D, 24 A. The answer is (D).\n\nQ: A pipe full of air is closed at one end. A standing wave is produced in the pipe, causing the pipe to sound a note. Which of the following is a correct statement about the wave\u2019s properties at the closed end of the pipe?\n(A) The pressure is at a node, but the particle displacement is at an antinode. (B) The pressure is at an antinode, but the particle displacement is at a node. (C) The pressure and the particle displacement are both at nodes. (D) The pressure and the particle displacement are both at antinodes.\nA: Let's think step by step. At the closed end of the pipe, the particles cannot have any net displacement because the pipe closure stops them. So the particle displacement is at a node. This closure also causes the pressure to be maximal, i.e. an antinode. The answer is (B).", "high_school_psychology": "The following are multiple choice questions (with answers) about high school psychology.\n\nQ: Pascale is interested in the processing strategies children use to learn new information. Pascale would best be classified as what type of psychologist?\n(A) sociocultural (B) clinical (C) cognitive (D) behaviorist\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Sociocultural psychologist focuses on the effect of societal factors on people. Clinical psychologist focuses on people with mental issues. Cognitive psychologist focuses on how people think and learn, including the processing strategies. Behaviorist focuses more on the environment and experience effect on people. The answer is (C).\n\nQ: According to Caplan's model of consultee-centered case consultation, the consultant is primarily interested in\n(A) identifying the causes and solutions of the client's presenting problems (B) identifying and eliminating the causes of the consultee's difficulties in handling a problem (C) establishing a hierarchy of authority to enable effective decision making (D) presenting a single, well-defined and unambiguous course of action for the consultant to overcome skills deficits\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Caplan defines two type of consultation. Client-centered case consultation aims to handle client's problems, while consultee-centered case consultation aims to identify the reason of client's difficulty to solve problems. The answer is (B).\n\nQ: According to the Individuals with Disabilities Education Improvement Act, which of the following must an educational agency do before it changes the educational placement of a student with a disability?\n(A) Give the child a trial period in the new environment (B) Notify the parents in writing (C) Obtain school board approval (D) Obtain parental consent\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. When the decision to change the educational placement of a student with a disability is made, the educational agency must notify the parents in writing on that date. The answer is (B).\n\nQ: While swimming in the ocean, Ivan is frightened by a dark shadow in the water even before he has the chance to identify what the shadow is. The synaptic connections taking place during this incident of fright are best described by which of the following?\n(A) Messages are sent from the thalamus directly to the amygdala. (B) Messages are sent from the thalamus to the \"what\" and \"where\" pathways. (C) Messages are sent from the parasympathetic nervous system to the cerebral cortex. (D) Messages are sent from the frontal lobes to the pituitary gland.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Our neural system has a mechanism that can respond immediate emotional signal before going to the thought center. In the Ivan's case, messages travel directly from thalamus to amygdala. The answer is (A).\n\nQ: Ani believes that her attitudes and behavior play a central role in what happens to her. Such a belief is likely to be associated with\n(A) a strong superego. (B) low self-esteem. (C) low self-efficacy. (D) an internal locus of control.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. People with an external locus of control believes fate and luck play an important role in their lives, while people with an internal locus of control believes they control their lives. The answer is (D).", "high_school_statistics": "The following are multiple choice questions (with answers) about high school statistics.\n\nQ: A new smartwatch is manufactured in one part of a factory, then secured for shipping in another, independent part of the factory. The weight of the smartwatch has a mean of 62 grams and a standard deviation of 1.0 grams. The weight of the packaging (box, user's guide, bubble wrap, etc.) has a mean of 456 grams and a standard deviation of 6 grams. Together, the distribution of the weight of the smartwatch and its packaging would have the following mean and standard deviation:\n(A) Mean 518 grams; standard deviation 7.0 grams (B) Mean 518 grams; standard deviation 3.5 grams (C) Mean 518 grams; standard deviation 6.1 grams (D) Mean 394 grams; standard deviation 6.1 grams\nA: Let's think step by step. Since the weight of the watch and the weight of the packaging are independent random variables, the mean and variance of their sum is equal to the sum of their individual means and variances. So the mean is 62 + 456 = 518 grams, and the variances is 1.0^2 + 6.0^2 = 37, leading to a standard deviation of 6.1 grams. The answer is (C).\n\nQ: After a frost warning was issued, the owner of a large orange grove asked his workers to spray all his trees with water. The water was supposed to freeze and form a protective covering of ice around the orange blossom. Nevertheless, the owner suspected that some trees suffered considerable damage due to the frost. To estimate the proportion of trees that suffered more than 50 percent damage due to the frost, he took a random sample of 100 trees from his grove. What is the response variable in this experiment?\n(A) The proportion of trees that suffered more than 50 percent damage due to frost. (B) The number of trees affected by the frost. (C) The number of trees sampled from the grove. (D) For each sampled tree, whether it suffered more than 50 percent damage or at most 50 percent damage.\nA: Let's think step by step. In this experiment, the response variable is what is measured. For each tree, what is measured is whether or not it suffered more than 50 percent damage due to the frost. The answer is (D).\n\nQ: Suppose X and Y are random variables with E(X) = 37, var(X) = 5, E(Y) = 62, and var(Y) = 12. What are the expected value and variance of the random variable X + Y?\n(A) E(X + Y) = 99, var(X + Y) = 8.5 (B) E(X + Y) = 99, var(X + Y) = 13 (C) E(X + Y) = 99, var(X + Y) = 17 (D) There is insufficient information to answer this question.\nA: Let's think step by step. While means of sums of random variables add (regardless of whether the variables are independent) in order to determine the variance of a sum of random variables, we need to know not just their individual variances but the covariance of the two variables, which is not given in this problem. The answer is (D).\n\nQ: Which of the following sets has the smallest standard deviation? Which has the largest?\nI: {1,2,3}\nII: {-10,10}\nIII: {100}\n(A) I, II (B) II, III (C) III, I (D) III, II\nA: Let's think step by step. The variance of distribution I is the expected squared deviation from its mean (which is 2), so the variance is 2/3 . The variance of distribution II is 10^2 (because both elements are 10 away from the mean of zero). The variance of distribution III is 0, since it has a single entry. So distribution III has the smallest standard deviation and distribution II has the largest. The answer is (D).\n\nQ: Which of the following is a correct statement about correlation?\n(A) If the slope of the regression line is exactly 1, then the correlation is exactly 1. (B) If the correlation is 0, then the slope of the regression line is undefined. (C) Switching which variable is called x and which is called y changes the sign of the correlation. (D) The correlation r is equal to the slope of the regression line when z-scores for the y-variable are plotted against z-scores for the x-variable.\nA: Let's think step by step. Statement A is false because the slope of the regression line being exactly 1 can occur even when the two variables are not perfectly correlated. Statement B is false because uncorrelated variables regression lines can have slope zero. Statement C is false because correlation is symmetric in the two random variables. The answer is (D).", "high_school_us_history": "The following are multiple choice questions (with answers) about high school us history.\n\nQ: This question refers to the following information.\nI come not to urge personal claims, nor to seek individual benefits; I appear as the advocate of those who cannot plead their own cause; I come as the friend of those who are deserted, oppressed, and desolate. In the Providence of God, I am the voice of the maniac whose piercing cries from the dreary dungeons of your jails penetrate not your Halls of Legislation. I am the Hope of the poor crazed beings who pine in the cells, and stalls, and cages, and waste rooms of your poor-houses. I am the Revelation of hundreds of wailing, suffering creatures, hidden in your private dwellings, and in pens and cabins\u2014shut out, cut off from all healing influences, from all mind-restoring cares.\u2026 Could their melancholy histories be spread before you as revealed to my grieved spirit during the last three months, how promptly, how earnestly would you search out the most approved means of relief; how trifling, how insignificant, by comparison, would appear the sacrifices you are asked to make; how would a few dimes and dollars, gathered from each citizen, diminish in value as a possession, compared with the certain benefits and vast good to be secured for the suffering insane...by the consecration and application of a sufficient fund to the construction of a suitable hospital.\u2026\n\u2014Dorothea Dix, Memorial Soliciting a State Hospital for the Protection and Cure of the Insane,\nSubmitted to the General Assembly of North Carolina, November 1848\nDorothea Dix can best be compared to whom?\n(A) Abigail Adams (B) Clara Barton (C) Shirley Temple (D) Hillary Clinton\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. Both Dorothea Dix and Clara barton are American nurses. The answer is (B).\n\nQ: This question refers to the following information.\n\"As our late Conduct at the Conestoga Manor and Lancaster have occasioned much Speculation & a great diversity of Sentiments in this and neighboring Governments; some vindicating & others condemning it; some charitably alleviating the Crime, & others maliciously painting it in the most odious & detestable Colours, we think it our duty to lay before the Publick, the whole Matter as it appeared, & still appears, to us. . . .\n\"If these things are not sufficient to prove an unjustifiable Attachment in the Quakers to the Indians Savages, a fixed Resolution to befriend them & an utter insensibility to human Distresses, let us consider a few more recent Facts. When we found the last Summer that we were likely to get no Assistance from the Government, some Volunteers went out at our own Expense, determined to drive our Enemies from our Borders; & when we came near to the great Island, we understood that a Number of their Warriors had gone out against our Frontiers. Upon this we returned and came up with them and fought with them at the Munfey Hill where we lost some of our Men & killed some of their Warriors & thereby saved our Frontiers from this Story in another Expedition. But no sooner had we destroyed their Provisions on the great Island, & ruined their trade with the good People at Bethlehem, but these very Indians, who were justly suspected of having murdered our Friends in Northampton County, were by the Influence of some Quakers taken under the Protection of the Government to screen them from the Resentments of the Friends and Relations of the Murdered, & to support them thro the Winter.\"\n\u2014\"Apology of the Paxton Boys\" (pamphlet), 1764 (Note: \"apology\" in this context should be read as an explanation, not an admission of guilt or regret.\nThe sentiments expressed in the explanation above reflect which of the ongoing tensions during the colonial period of American history?\n(A) Tensions between British policies and the aspirations of North American colonists. (B) Tensions between American Indians allied with the French and those allied with the British. (C) Tensions between freed African Americans and white planters. (D) Tensions between backcountry settlers and elites within colonial America.\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. After the French and Indian War, the Scotch-Irish settlers attacked American Indians. After the attacks on the Conestoga, about 250 Paxton Boys present their grievances to the Pennsylvania legislature. As mentioned in the information, the Paxton Boys cited resentiment at local elites. The answer is (D).\n\nQ: This question refers to the following information.\nOur leaders talk about stopping aggression from the north, but this was a struggle among groups of Vietnamese until we intervened. We seem bent upon saving the Vietnamese from Ho Chi Minh even if we have to kill them and demolish their country to do it. As the native people survey bombed-out villages, women and children burned by napalm, rice crops destroyed and cities overrun with our military personnel, they are doubtless saying secretly of the Vietcong guerillas and of the American forces, \"A plague on both your houses.\" \u2026 Stop the bombing, north and south, end search and destroy offensive sweeps, and confine our military action to holding operations on the ground. Bombing the north has failed to halt or seriously check the flow of troops to the south and may, in fact, have prompted a much greater war effort by Hanoi.\n\u2014Senator George McGovern, \"The Lessons of Vietnam,\" April 25, 1967\nWhich of the following opinions from the 1960s most directly reflects the perspective of George McGovern's speech?\n(A) Americans must maximize their technological edge in Vietnam. (B) American bombing in Vietnam is step by step leading to progress in the war. (C) American bombing in Vietnam is a failure. (D) America must not give in to defeatism about the war in Vietnam.\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. \"Stop the bombing\" and \"Bombing the north has failed to halt or seriously check the flow of troops to the south\" indicate that the perspective of George McGovern's speech is that Amerian bombing in Vietnam is a failure. The answer is (C).\n\nQ: This question refers to the following information.\n\"In the new Code of Laws which I suppose it will be necessary for you to make I desire you would Remember the Ladies, and be more generous and favorable to them than your ancestors. Do not put such unlimited power into the hands of the Husbands. Remember all Men would be tyrants if they could. If particular care and attention is not paid to the Ladies we are determined to foment a Rebellion, and will not hold ourselves bound by any Laws in which we have no voice, or Representation.\"\nAbigail Adams, in a letter to John Adams, 1776\n\"Special legislation for woman has placed us in a most anomalous position. Women invested with the rights of citizens in one section\u2014voters, jurors, office-holders\u2014crossing an imaginary line, are subjects in the next. In some States, a married woman may hold property and transact business in her own name; in others, her earnings belong to her husband. In some States, a woman may testify against her husband, sue and be sued in the courts; in others, she has no redress in case of damage to person, property, or character. In case of divorce on account of adultery in the husband, the innocent wife is held to possess no right to children or property, unless by special decree of the court. But in no State of the Union has the wife the right to her own person, or to any part of the joint earnings of the co-partnership during the life of her husband. In some States women may enter the law schools and practice in the courts; in others they are forbidden. In some universities girls enjoy equal educational advantages with boys, while many of the proudest institutions in the land deny them admittance, though the sons of China, Japan and Africa are welcomed there. But the privileges already granted in the several States are by no means secure.\"\nSusan B. Anthony, \"Declaration of Rights for Women,\" July 4, 1876\nThe sentiments expressed in the second excerpt by Susan B. Anthony are most likely in support of\n(A) the Equal Rights Amendment (B) universal suffrage (C) states' rights (D) prohibition\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. The above information mentioned that women are in an anomalous position in terms of legislation. Women's earnings do not belong to themselves, or they cannot testify against her husbands. Susan believes women should have equal legal rights as men. The answer is (B).\n\nQ: This question refers to the following information.\n\"Society in every state is a blessing, but government even in its best state is but a necessary evil; in its worst state an intolerable one; for when we suffer, or are exposed to the same miseries by a government, which we might expect in a country without government, our calamity is heightened by reflecting that we furnish the means by which we suffer. Government, like dress, is the badge of lost innocence; the palaces of kings are built on the ruins of the bowers of paradise. For were the impulses of conscience clear, uniform, and irresistibly obeyed, man would need no other lawgiver; but that not being the case, he finds it necessary to surrender up a part of his property to furnish means for the protection of the rest; and this he is induced to do by the same prudence which in every other case advises him out of two evils to choose the least. Wherefore, security being the true design and end of government, it unanswerably follows that whatever form thereof appears most likely to ensure it to us, with the least expense and greatest benefit, is preferable to all others.\"\nThomas Paine, Common Sense, 1776\nWhich of the following \"miseries\" alluded to above were most condemned by Anti-Federalists of the post-Revolutionary era?\n(A) Organized response to Bacon's Rebellion (B) Federal response to Shays's Rebellion (C) Federal response to the Whiskey Rebellion (D) Federal response to Pontiac's Rebellion\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. Anti-Federalists do not believe centralized government power, and suspect Washington's military response to Whiskey Rebellion. Bacon's Rebellion and Pontiac's Rebellion happen before the Revolution and they can be ruled out. The answer is (C).", "high_school_world_history": "The following are multiple choice questions (with answers) about high school world history.\n\nQ: This question refers to the following information.\n\"At least one of the [world's] societies would have to somehow enormously increase its productivity [in order to achieve global hegemony]. That quantum jump would have to be made before the various scientific, technological, agricultural, and industrial revolutions on which our post-quantum-leap world rests. It could only be accomplished by exploiting the ecosystems, mineral resources, and human assets of whole continents outside the lands of the society making the jump. Western Europe did just that by means of its brutality and guns and, more important, by geographical and ecological luck.\"\nCopyright \u00a9 2015 Cambridge University Press.\nAlfred Crosby, historian, Ecological Imperialism, 2004\nThe \"quantum jump\" mentioned in the passage most directly contributed to which of the following developments in the period 1450\u20131750 C.E.?\n(A) A breakdown in trade routes through the collapse of the established state structure (B) An increase in the population of the world through more plentiful supplies of food (C) The spread of Chinese and Indian belief systems across the world (D) An increase in social unrest\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The \"quantum jump\" mentioned in the passage refers to the conquest of the New World and the Columbian Exchange. Choice (A) and (C) did not happen in history. Choice (C) refers to the human assets. The answer is (B).\n\nQ: This question refers to the following information.\n\"The struggle against neo-colonialism is not aimed at excluding the capital of the developed world from operating in less developed countries. It is aimed at preventing the financial power of the developed countries being used in such a way as to impoverish the less developed.\nNon-alignment, as practiced by Ghana and many other countries, is based on co-operation with all States whether they be capitalist, socialist or have a mixed economy. Such a policy, therefore, involves foreign investment from capitalist countries, but it must be invested in accordance with a national plan drawn up by the government of the non-aligned State with its own interests in mind. The issue is not what return the foreign investor receives on his investments\u2026The question is one of power. A State in the grip of neo-colonialism is not master of its own destiny.\"\nKwame Nkrumah, Neo-Colonialism, 1965\nWhich of the following provides the best context for Nkrumah's writings?\n(A) The Industrial Revolution (B) Decolonization (C) Regional Free Trade Associations (D) Autarky\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The passage expresses a point that the successful fight against neo-colonialism were in danger and the newly independent nations like Ghana may be re-colonized via financial power of the developed countries. The answer is (B).\n\nQ: This question refers to the following information.\n\"Indeed, as both the fatwas of distinguished [scholars] who base their opinion on reason and tradition alike and the consensus of the Sunni community agree that the ancient obligation of extirpation, extermination, and expulsion of evil innovation must be the aim of our exalted aspiration, for \"Religious zeal is a victory for the Faith of God the Beneficent\"; then, in accordance with the words of the Prophet (Peace upon him!) \"Whosoever introduces evil innovation into our order must be expelled\" and \"Whosoever does aught against our order must be expelled,\" action has become necessary and exigent\u2026\"\nLetter from Ottoman Sultan Selim I to Safavid Shah Ismail I, 1514\nThe letter from Selim I is most clearly an example of which of the following?\n(A) The maintenance of military supremacy at all costs (B) Expanding tensions between religious sects (C) Factors that brought about the collapse of the Ottoman Empire (D) Peacemaking efforts among the Islamic empires\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The passage is an example of expanding tensions between Selim and Ismail. In the passage the Selim references the fatwa and the consensus of the Sunni community to against whosoever introduces evil. The answer is (B).\n\nQ: This question refers to the following information.\n\"The real grievance of the worker is the insecurity of his existence; he is not sure that he will always have work, he is not sure that he will always be healthy, and he foresees that he will one day be old and unfit to work. If he falls into poverty, even if only through a prolonged illness, he is then completely helpless, exam_ins to his own devices, and society does not currently recognize any real obligation towards him beyond the usual help for the poor, even if he has been working all the time ever so faithfully and diligently. The usual help for the poor, however, leaves a lot to be desired, especially in large cities, where it is very much worse than in the country.\"\nOtto von Bismarck, 1884\nOtto von Bismarck likely made this speech in reaction to which of the following issues?\n(A) Social acceptance of child labor (B) Declining life expectancy in Germany (C) Criticisms of German trade tariffs (D) Negative effects attributed to industrial capitalism\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The passage talks about the grievance of the work under the industrial capitalism. The answer is (D).\n\nQ: This question refers to the following information.\nHe contains all works and desires and all perfumes and all tastes. He enfolds the whole universe and in silence is loving to all. This is the Spirit that is in my heart, this is Brahman. To him I shall come when I go beyond this life, and to him will come he who has faith and doubts not.\n\u2014The Upanishads, India, c. 1000 BCE\nTo which religion does the speaker most likely belong?\n(A) Hinduism (B) Buddhism (C) Shintoism (D) Zoroastrianism\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. Brahman refers to the ultimate reality of all things in the Hindu religion. In contrast, Buddhism does not have a concept of supreme God. The answer is (A).", "human_aging": "The following are multiple choice questions (with answers) about human aging.\n\nQ: All other things being equal, which of the following persons is more likely to show osteoporosis?\n(A) An older Hispanic American woman (B) An older African American woman (C) An older Asian American woman (D) An older Native American woman\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. Although osteoporosis can occur at any age, the risk is higher for older people. It is most common in Asian and non-Hispanic white women. The answer is (C).\n\nQ: The finding that adults tend to remember events from their adolescence better than from other periods in their lives is referred to as the\n(A) Adolescence advantage (B) Reminiscence bump (C) Memorial memorial (D) Quadratic retrieval spike\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. Reminiscence bump is a phenomenon that older adults tend to recollect events during their young ages. People usually have a period of childhood amnesia from birth to around age 5, and a reminiscence bump between 10 and 30. The answer is (B).\n\nQ: Which element in tobacco smoke is responsible for cancers?\n(A) Nicotine (B) Tar (C) Carbon monoxide (D) Smoke particles\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. The benzene, acrylamide and acrylonitrile in tar interact with the lungs and cause DNA mutations in cells of the lungs, and lead to cancer. The answer is (B).\n\nQ: When older adults move to a new state after retirement, which of the following is the more likely destination?\n(A) Texas (B) California (C) Hawaii (D) Vermont\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. Texas does not have state tax, and has low cost of living compared with the other three options. The answer is (A).", "human_sexuality": "The following are multiple choice questions (with answers) about human sexuality.\n\nQ: The most common disorder among men who seek sexual therapy is:\n(A) premature ejaculation (B) inhibited ejaculation (C) erectile disorder (D) ejaculatory disorder\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. The most common type of arousal disorder for men is erectile dysfunction, meaning that a person is not able to get any physical satisfaction from sexual activity although he may be interested in it. The answer is (C).\n\nQ: A woman who knows she has active herpes and untreated syphilis but continues to have sex without informing her partners of her condition has, in psychoanalytic terms:\n(A) a strong ego (B) a weak superego (C) a weak id (D) a strong superego\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. A person with weak superego tends to be delinquent, criminal or have antisocial personality. The action of the woman who knows she has active venereal disease but still have sex with her partners indicate she may has antisocial personality. The answer is (B).\n\nQ: The nature of homosexual activities that occur during preadolescence include all but which of the following?\n(A) sexual intercourse (B) circle jerks (C) exhibitionism (D) touching each other's genitals\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. Sexual intercourse is prohibited during preadolescence. The answer is (A).\n\nQ: Women's ability to have multiple orgasms is primarily due to:\n(A) the fact that they do not have a refractory period. (B) the response of the inner layers of the vagina. (C) having alternating orgasms in different locations. (D) the G-Spot.\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. The refractory period is the time when a person is not able to be erect or is not interested in sex. The answer is (A).\n\nQ: Morning sickness is typically a problem:\n(A) during the first trimester (B) during the second trimester (C) during the third trimester (D) all through the pregnancy\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. Morning sickness usually begins by nine weeks after conception, corresponding to the first trimester. The answer is (A).", "international_law": "The following are multiple choice questions (with answers) about international law.\n\nQ: How the consent to be bound of a State may be expressed?\n(A) The consent of a State to be bound is expressed only by ratification (B) The consent of a state to be bound by a treaty may be expressed by signature, ratification, acceptance, approval or accession (C) The consent of a State to be bound is expressed by signature (D) The consent of a State to be bound is expressed by whatever means they choose\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. Article 11 of Vienna Convention on the Law of Treaties signed in 1969 states that \"the consent of a State to be bound by a treaty may be expressed by signature, exchange of instruments constituting a treaty, ratification, acceptance, approval or accession, or by any other means if so agreed.\" (B) is the most precise and accurate answer. The answer is (B).\n\nQ: What is the judge ad hoc?\n(A) If a party to a contentious case before the ICJ does not have a national sitting as judge, it is entitled to nominate someone as a judge solely for that case, with the title of judge ad hoc (B) Judge ad hoc is the member of the bench of the ICJ with a casting vote (C) Judge ad hoc is a surrogate judge, in case a judge is disqualified or passes away (D) Judge ad hoc is the judge that each party will always nominate in every contentious case\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. As \"ad hoc\" implies, a judge ad hoc is appointed only for a specific case or period, when a party to a contentious case before the International Court of Justice does not have a regular national sitting as judge. The answer is (A).\n\nQ: When 'consent' can serve as a circumstance precluding the wrongfulness of a State conduct?\n(A) Consent can serve as a circumstance precluding the wrongfulness whenever it is given (B) Consent can never serve as a circumstance precluding wrongfulness (C) Consent can serve as a circumstance precluding wrongfulness, provided the consent is valid and to the extent that the conduct remains within the limits of the consent given (D) Consent can always serve as a circumstance precluding wrongfulness, no matter which organ of the State gives it\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. Valid consent can serve as a circumstance precluding the wrongfulness of a State conduct if the conduct remains within the limits of that consent, according to Chapter V of the Responsibility of States for Internationally Wrongful Acts, 2001, United Nations. The answer is (C).\n\nQ: Would a reservation to the definition of torture in the ICCPR be acceptable in contemporary practice?\n(A) This is an acceptable reservation if the reserving country's legislation employs a different definition (B) This is an unacceptable reservation because it contravenes the object and purpose of the ICCPR (C) This is an unacceptable reservation because the definition of torture in the ICCPR is consistent with customary international law (D) This is an acceptable reservation because under general international law States have the right to enter reservations to treaties\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. For it contravenes the object and purpose of the ICCPR, this is an unacceptable reservation in contemporary practice. The answer is (B).\n\nQ: What types of force does Article 2(4) of the UN Charter prohibit?\n(A) Article 2(4) encompasses only armed force (B) Article 2(4) encompasses all types of force, including sanctions (C) Article 2(4) encompasses all interference in the domestic affairs of States (D) Article 2(4) encompasses force directed only against a State's territorial integrity\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. Article 2(4) of the UN Charter prohibits states from using armed forces in their international relations. The answer is (A).", "jurisprudence": "The following are multiple choice questions (with answers) about jurisprudence.\n\nQ: Iverson Jewelers wrote a letter to Miller, 'We have received an exceptionally fine self winding Rolox watch which we will sell to you at a very favorable price.'\n(A) The letter is an offer to sell (B) A valid offer cannot be made by letter. (C) The letter contains a valid offer which will terminate within a reasonable time. (D) The letter lacks one of the essential elements of an offer.\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. An offer shows the intent to enter into a mutually-beneficial contract with specific terms. An offer can be made by a letter. While this letter indicates the willingness to sell, the lack of specific terms, such as transaction price and offer expiration date, makes it an incomplete offer. The answer is (D).\n\nQ: Functions of the law include all but which of the following?\n(A) maximizing individual freedom (B) providing a basis for compromise (C) keeping the peace (D) promoting the principles of the free enterprise system\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. Laws are fundamentally about helping resolve disputes between individuals, and therefore essential for maximizing individual freedom, providing a basis for compromise, and keeping the peace. The answer is (D).\n\nQ: The ________ School of jurisprudence postulates that the law is based on what is \"correct.\"\n(A) Natural Law (B) Analytical (C) Historical (D) Sociological\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. Natural Law School of jurisprudence focuses on the laws of nature, and states that the law should be based on ethics, morals, and what is \"correct\". Analytical deals with the law as it already exists, Historical postulates that the law was found and not made, and Sociological studies how the law and society impact each other. The answer is (A).\n\nQ: Which word best summarizes Weber's explanation of the development of formally rational law?\n(A) Authority. (B) Charisma. (C) Co-operation. (D) Capitalism.\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. Weber explained the development of formal rationality in laws as how the modern society moved from tradition to rationality, where people decide actions based less on how they were culturally done and more on expected utilities. How rational individuals optimize efficiency of accomplishing tasks for higher rewards is a core principle of Capitalism. The answer is (D).\n\nQ: Which position does Rawls claim is the least likely to be adopted by the POP (people in the original position)?\n(A) The POP would choose equality above liberty. (B) The POP would opt for the 'maximin' strategy. (C) The POP would opt for the 'difference principle'. (D) The POP would reject the 'system of natural liberty.'\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. The POP would opt for the 'maximin' strategy, opt for the 'difference principle', and reject the 'system of natural liberty', but the POP would not choose equality above liberty, since the POP assume both equal and free citizens. The answer is (A).", "logical_fallacies": "The following are multiple choice questions (with answers) about logical fallacies.\n\nQ: When an arguer causes confusion during refutation because of real or feigned lack of an ability to engage in refutation, that arguer may have committed the fallacy of\n(A) poor sportsmanship (B) appeal to compassion (C) argument against the person (D) ignorance of refutation\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. Ignorance of refutation, one of Aristotle's original list of logical fallacies in his Organon, is when someone causes confusion in an argument through real or feigned inability to engage in refutation, in order to win the argument. The answer is (D).\n\nQ: The complex question fallacy consists of\n(A) arguing something is inferior just because it doesn't do something it was never intended to do. (B) including more than one claim in the proposition and treating proof for one claim as proof for all the claims. (C) drawing a conclusion before examining the evidence, and only considering evidence that supports that conclusion. (D) asking a question that includes either an unproven assumption or more than one question, thus making a straightforward yes or no answer meaningless.\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. The complex question fallacy is when someone makes a single yes or no answer to a question meaningless, by including either an unproven assumption or many questions. The latter is also known as the many questions fallacy. The answer is (D).\n\nQ: Arguing that what is true of the parts must be true of the whole is the fallacy of...\n(A) Division (B) Composition (C) Appeal to the person (D) Appeal to ignorance\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. Fallacy of composition occurs when someone argues what is true of the parts must be true of the whole. The answer is (B).\n\nQ: Which of the following is true of a valid categorical syllogism?\n(A) The minor premise must deny the antecedent (B) The major premise must affirm the consequent (C) The middle term must be used in at least one premise in a universal or unqualified sense (D) All of the above\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. A valid categorical syllogism must satisfy several conditions: (1) the syllogism must have exactly three terms (2) every term of the syllogism must be used twice exactly, (3) a term may be used only once in any premise, and (4) the middle term must be used in at least one premise in a universal or unqualified sense, etc. Only (C) is true. The answer is (C).\n\nQ: If someone attacks the character of an opposing arguer, instead of responding to that opponent's arguments, the first person has probably committed which of the following fallacies?\n(A) tu quoque (B) horse laugh (C) argument against the person (D) ignoratio elenchi\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. The argument against the person fallacy occurs when someone irrelevantly attacks the character of an opposing arguer, instead of addressing that opponent's arguments. The answer is (C).", "machine_learning": "The following are multiple choice questions (with answers) about machine learning.\n\nQ: Which image data augmentation is most common for natural images?\n(A) random crop and horizontal flip (B) random crop and vertical flip (C) posterization (D) dithering\nA: Let's think step by step. Data augmentation is used to increase the diversity of images in the training dataset. It is important that natural images are kept natural after being augmented. Vertical flips of images are not natural, so (B) is false. Posterization makes the image look like a poster and and dithering increases color depth. None of these two preserve the natural property. The only natural data augmentation technique is (A). The answer is (A).\n\nQ: Traditionally, when we have a real-valued input attribute during decision-tree learning we consider a binary split according to whether the attribute is above or below some threshold. Pat suggests that instead we should just have a multiway split with one branch for each of the distinct values of the attribute. From the list below choose the single biggest problem with Pat\u2019s suggestion:\n(A) It is too computationally expensive. (B) It would probably result in a decision tree that scores badly on the training set and a testset. (C) It would probably result in a decision tree that scores well on the training set but badly on a testset. (D) It would probably result in a decision tree that scores well on a testset but badly on a training set.\nA: Let's think step by step. Because the input is real valued, it is unlikely that the same values appear both at training and test time. This means that while such a decision tree could yield good performance on the training data, when evaluated on the test data it will perform badly because the decision tree won\u2019t know what to do with numbers that did not appear in the training data. The answer is (C).\n\nQ: You are reviewing papers for the World\u2019s Fanciest Machine Learning Conference, and you see submissions with the following claims. Which ones would you consider accepting?\n(A) My method achieves a training error lower than all previous methods! (B) My method achieves a test error lower than all previous methods! (Footnote: When regularisation parameter \u03bb is chosen so as to minimise test error.) (C) My method achieves a test error lower than all previous methods! (Footnote: When regularisation parameter \u03bb is chosen so as to minimise cross-validaton error.) (D) My method achieves a cross-validation error lower than all previous methods! (Footnote: When regularisation parameter \u03bb is chosen so as to minimise cross-validaton error.)\nA: Let's think step by step. In machine learning, we train with some data and fixed hyperparameters and the training error can be arbitrarily low, so (A) can\u2019t be right. Then, one compares different hyperparameters by selecting the model with the lowest cross-validation error, this means that (B) and (D) are not the right procedure. The only relevant number after these is the test error and thus (C) is the right answer. The answer is (C).\n\nQ: A 6-sided die is rolled 15 times and the results are: side 1 comes up 0 times; side 2: 1 time; side 3: 2 times; side 4: 3 times; side 5: 4 times; side 6: 5 times. Based on these results, what is the probability of side 3 coming up when using Add-1 Smoothing?\n(A) 2.0/15 (B) 1.0/7 (C) 3.0/16 (D) 1.0/5\nA: Let's think step by step. Add-1 smoothing adds the value of one to the different counts and then normalizes the probabilities accordingly. The counts after adding one will be: side 1 comes up 1 time; side 2: 2 times; side 3: 3 times; side 4: 4 times; side 5: 5 times; side 6: 6 times. The number of sum one die rolls will be 21, so the probability of drawing a three is 3/21 = 1/7. The answer is (B).\n\nQ: To achieve an 0/1 loss estimate that is less than 1 percent of the true 0/1 loss (with probability 95%), according to Hoeffding's inequality the IID test set must have how many examples?\n(A) around 10 examples (B) around 100 examples (C) between 100 and 500 examples (D) more than 1000 examples\nA: Let's think step by step. By the Hoeffding\u2019s inequality, we expect that with 95% probability the in-sample and out-of-sample errors differ by epsilon when we have N samples if 2 exp(-2 epsilon^2 N)<0.05, this implies that N > -1/(2*epsilon**2) log ( 0.05/2 )= log (40)*5000. Since log(40)>1, we have that one needs more than 1000 examples. The answer is (D).", "management": "The following are multiple choice questions (with answers) about management.\n\nQ: How can organisational structures that are characterised by democratic and inclusive styles of management be described?\n(A) Hierarchical (B) Bureaucratic (C) Flat (D) Functional\nA: Let's think step by step. We refer to Wikipedia articles on management for help. Flat organizational structures are characterized by democratic and inclusive styles of management, and have few (if any) levels of management between the workers and managers. The answer is (C).\n\nQ: Hygiene factors are associated with which writer?\n(A) Frederick Hertzberg (B) D.C. McClelland (C) Abraham Maslow (D) Douglas McGregor\nA: Let's think step by step. We refer to Wikipedia articles on management for help. Hygiene factors include compensation, company policies, supervision, interpersonal relations, and work environments. Hertzberg lists them as factors that cannot motivate employees but can minimize job dissatisfaction. The answer is (A).\n\nQ: What characteristic is not a key feature of the 'open systems' model of management?\n(A) Morale (B) Innovation (C) Growth resource (D) Adaptation\nA: Let's think step by step. We refer to Wikipedia articles on management for help. The key characteristics of an open system in management include innovation, growth resource, and adaption, but do not include morale. The answer is (A).\n\nQ: Which element of the cultural web forms regalia?\n(A) Symbols (B) Rituals and routines (C) Power structures (D) Control systems\nA: Let's think step by step. We refer to Wikipedia articles on management for help. The cultural web is a tool for mapping an organization's culture, where symbols form the regalia that visually expresses the values that the organization holds as important. The answer is (A).\n\nQ: What are the two main dimensions of the Ohio Studies into leadership?\n(A) Starting position and end position (B) Initial environment and changed environment (C) Organisational structure and conditioning (D) Initiating structure and considerations\nA: Let's think step by step. We refer to Wikipedia articles on management for help. The Ohio State Leadership Studies conducted in the 1940s identified initiating structure and consideration as the two main dimensions of leader behavior. The answer is (D).", "marketing": "The following are multiple choice questions (with answers) about marketing.\n\nQ: Although the content and quality can be as controlled as direct mail, response rates of this medium are lower because of the lack of a personal address mechanism. This media format is known as:\n(A) Care lines. (B) Direct mail. (C) Inserts. (D) Door to door.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Door to door marketing delivers non-addressed items within all buildings within a geographic area. While it can control the content and quality as well as direct mail marketing, its response rate is lower because of the lack of a personal address mechanism. The answer is (D).\n\nQ: In an organization, the group of people tasked with buying decisions is referred to as the _______________.\n(A) Outsourcing unit. (B) Procurement centre. (C) Chief executive unit. (D) Decision-making unit.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. In an organization, the group of the people tasked with buying decision is referred to as the decision-making unit. The answer is (D).\n\nQ: The single group within society that is most vulnerable to reference group influence is:\n(A) The older consumer who feels somewhat left out of things. (B) The married women, many of whom feel a need for stability in their lives. (C) New immigrants who really want to assimilate into their new culture. (D) Children, who base most of their buying decisions on outside influences.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Children, who mostly based their buying decisions on outside influences, are the single group within society that is more vulnerable to reference group influence. The answer is (D).\n\nQ: Which of the following is an assumption in Maslow's hierarchy of needs?\n(A) Needs are dependent on culture and also on social class. (B) Lower-level needs must be at least partially satisfied before higher needs can affect behaviour. (C) Needs are not prioritized or arranged in any particular order. (D) Satisfied needs are motivators, and new needs emerge when current needs remain unmet.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Maslow's hierarchy of needs, from the bottom upwards, are physiological (food and clothing), safety, love and belonging needs, esteem, and self-actualization. Lower-level needs must be at least partially satisfied before higher ones can affect behavior. The answer is (B).\n\nQ: _____________ is a natural outcome when combining demographic and geographic variables.\n(A) Geodemographics (B) Product differentiation. (C) ANSOFF matrix. (D) Brand management.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Geodemographics is a natural outcome when combining demographic and geographic variables. The answer is (A).", "medical_genetics": "The following are multiple choice questions (with answers) about medical genetics.\n\nQ: The stage of meiosis in which chromosomes pair and cross over is:\n(A) prophase I (B) metaphase I (C) prophase II (D) metaphase II\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. Prophase I is the stage of meiosis where homologous chromosomes pair with each other and exchange genetic material. The answer is (A).\n\nQ: DNA ligase is\n(A) an enzyme that joins fragments in normal DNA replication (B) an enzyme of bacterial origin which cuts DNA at defined base sequences (C) an enzyme that facilitates transcription of specific genes (D) an enzyme which limits the level to which a particular nutrient reaches\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. DNA ligase is a type of enzyme (EC 6.5.1.1) responsible for joining DNA strands together by catalyzing a phosphodiester bond. The answer is (A).\n\nQ: Which of the following conditions does not show multifactorial inheritance?\n(A) Pyloric stenosis (B) Schizophrenia (C) Spina bifida (neural tube defects) (D) Marfan syndrome\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. Multifactorial inheritance is when more than a single factor is responsible for causing a given trait or health problem. Genes cannot be the only factor. Marfan syndrome, on the other hand, requires only one abnormal copy of the of the Marfan gene, from one parent, to inherit the trait. The answer is (D).\n\nQ: A gene showing codominance\n(A) has both alleles independently expressed in the heterozygote (B) has one allele dominant to the other (C) has alleles tightly linked on the same chromosome (D) has alleles expressed at the same time in development\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. Codominance, as it relates to genetics, refers to a type of genetic inheritance where the phenotype of both the parents is easily observed in the offspring. A heterozygote is an individual having two different alleles of a gene. The answer is (A).\n\nQ: Large triplet repeat expansions can be detected by:\n(A) polymerase chain reaction. (B) single strand conformational polymorphism analysis. (C) Southern blotting. (D) Western blotting.\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. A Southern blot is a method in molecular biology for detecting specific DNA sequences in a sample. Large triplet repeat expansions are usually detected with this method. The answer is (C).", "miscellaneous": "The following are multiple choice questions (with answers) about miscellaneous.\n\nQ: Which of these songs was a Top 10 hit for the rock band The Police?\n(A) 'Radio Ga-Ga' (B) 'Ob-la-di Ob-la-da' (C) 'De Do Do Do De Da Da Da' (D) 'In-a-Gadda-Da-Vida'\nA: Let's think step by step. We refer to Wikipedia for help. Radio Ga-Ga is by Queen. Ob-la-di Ob-la-da is by The Beatles. And In-a-Gadda-Da-Vida is by Iron Butterfly. Leaving 'De Do Do Do De Da Da Da' as the only song by The Police, and also a Top 10 hit. The answer is (C).\n\nQ: What place is named in the title of the 1979 live album by rock legends Cheap Trick?\n(A) Budapest (B) Budokan (C) Bhutan (D) Britain\nA: Let's think step by step. We refer to Wikipedia for help. Nippon Budokan is an indoor arena in Tokyo, Japan renowned for hosting rock music concerts including Cheap Trick in 1978. 'Cheap Trick at Budokan' became the name of their album. The answer is (B).\n\nQ: What is produced during photosynthesis?\n(A) hydrogen (B) nylon (C) oxygen (D) light\nA: Let's think step by step. We refer to Wikipedia for help. Photosynthesis is the process in which green plants use the green pigment chlorophyll to synthesize foods with water and carbon dioxide. Oxygen is the byproduct of this process. The answer is (C).\n\nQ: Who is the shortest man to ever win an NBA slam dunk competition?\n(A) Anthony 'Spud' Webb (B) Michael 'Air' Jordan (C) Tyrone 'Muggsy' Bogues (D) Julius 'Dr J' Erving\nA: Let's think step by step. We refer to Wikipedia for help. In 1986, Spud Webb, standing only 5'7\" became the shortest NBA player in history to win an official slam dunk contest. The answer is (A).\n\nQ: How many axles does a standard automobile have?\n(A) one (B) two (C) four (D) eight\nA: Let's think step by step. We refer to Wikipedia for help. Most cars have two axles to rotate the wheels.. The answer is (B).", "moral_disputes": "The following are multiple choice questions (with answers) about moral disputes.\n\nQ: Baron admits that the versions of the ticking bomb hypothetical she discusses are \"stunningly stupid,\" but she claims this is actually evidence of\n(A) the stupidity of most traditional philosophical examples. (B) a general lack of intelligence among people with advanced degrees. (C) the wrongness of torture. (D) the readiness on the part of many intelligent people to see torture as the best solution to deal with terrorism.\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. The ticking bomb hypothetical poses a problem where many people will die to an exploding bomb, if the hypothetical terrorist does not disclose how to defuse it. Baron sees this hypothetical as silly, but its prevalence does suggest intelligent people, particularly utilitarians, see torture as justifiable to save the lives in this scenario. The answer is (D).\n\nQ: A fertilized ovum is also known as\n(A) a zygote. (B) an embryo. (C) a viability. (D) a blastocyst.\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. Once a single sperm penetrates the layers of an egg to form a new cell, that cell is called a zygote. The answer is (A).\n\nQ: Pence compares six different cases of reproduction, from natural twinning to SCNT. What conclusion does he draw from this comparison?\n(A) SCNT is not a different kind of reproduction because there are no morally relevant differences between it and other permissible means of reproduction. (B) Because there is a low risk of harm for natural twinning, there will be a low risk of harm for SCNT. (C) both A and B (D) neither A nor B\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. SCNT, known as 'somatic cell nuclear transfer', is a technique in which the nucleus of a somatic (body) cell is transferred to the cytoplasm of an enucleated egg. This is used in farm animal cloning. Pence may compare this to other processes of reproduction, and see little moral separation. The answer is (A).\n\nQ: Which of the following is an intrinsic good, according to Singer?\n(A) being in the upper class (B) the ability to learn and solve problems (C) being more beautiful than most of the population (D) being shorter than average\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. Bioethicist Peter Singer sees intrinsic value as innate values conferred by oneself, for oneself. Innanimate objects can be beautiful, short, or have some valuable criteria, but capabilities are intrinsically good. The answer is (B).\n\nQ: According to Metz, what is wrong with consequentialist arguments against capital punishment based on African values?\n(A) It is unclear as of yet whether or not capital punishment deters harm to the community. (B) It is unclear as of yet whether or not capital punishment deters harm to any individuals. (C) Consequentialism is not supported by African values. (D) Even though consequentialism is supported by African values, no consequentialist arguments framed in terms of African values have been offered.\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. Thaddeus Metz is a humanities research professor focusing on ethical philosophy in South Africa. Metz has written the death penalty is unjustified as it treats individuals as incapable of communal relations. It is unclear that capital punishment is to the benefit of, or a deterrent of harm to the community. The answer is (A).", "moral_scenarios": "The following are multiple choice questions (with answers) about moral scenarios.\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I took his wallet after he harassed me at the bar. Scenario 2 | I made love to my newborn baby boy.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Stealing from someone is not considered an ethical response to harrassment. And having sex with a minor is egregiously unethical. The answer is (A).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I asked for a cigarette from the guy outside the store. Scenario 2 | When the security guard left his post, I pocketed a rare coin.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Asking for something is not morally wrong. Stealing is morally wrong. The answer is (C).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | During the house's demolition, I pushed him into a wall. Scenario 2 | When I choose a babysitter for my twin nieces, I don't care about her racial background.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Physical violence is morally wrong. Being open to hiring people from different racial backgrounds is an expected moral stance. The answer is (B).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I bought an old pick-up truck for work. Scenario 2 | I looked up at the gorgeous waterfall and threw a rock at it.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Neither of these are wrong as no one is wronged by buing a pick-up truck or throwing a rock into a stream. The answer is (D).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I really loved my newborn baby boy. Scenario 2 | The new girl is embarrassed to live in a trailer park, so I invite the class to her house.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Loving someone is not wrong. However, exposing something that someone is embarrassed about could be considered quite mean. The answer is (C).", "nutrition": "The following are multiple choice questions (with answers) about nutrition.\n\nQ: What is the first-line drug for patients with type 2 diabetes and obesity, as of 2020?\n(A) Acarbose (B) Metformin (C) Sulphonylureas (D) Insulin\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. Metformin (Fortamet, Glumetza, or others) is usually the first medication prescribed for type 2 diabetes, as well as obesity. It works by lowering glucose production in the liver and improving the body's sensitivity to insulin. The answer is (B).\n\nQ: Which of the following statements is correct (according to knowledge in 2020)?\n(A) Consumers with phenylketonuria must avoid the consumption of the sweetener aspartame (B) Consumers with phenylketonuria must avoid the consumption of the sweetener saccharin (C) Consumers with phenylketonuria must avoid the consumption of the sweetener sucralose (D) Consumers with phenylketonuria must avoid the consumption of the sweetener acesulfame K\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. People with phenylketonuria (PKU) cannot break down the amino acid phenylalanine. As it builds up in the blood and brain it can lead to brain damage. People with PKU should avoid foods that are converted to phenylalanine in the body, such as aspartame. The answer is (A).\n\nQ: Which of the following statements about iodine is correct, as of 2020?\n(A) 50% of adults consume iodine at levels below the RNI (B) Dairy products are a poor source of iodine (C) The iodine content of organic milk is generally lower that the level in non-organic milk (D) UK dietary reference values recommend an increase in iodine intake in pregnancy\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. Organic milk usually has less iodine content than non-organic milk. The answer is (C).\n\nQ: Which of the following is the most plausible explanation for the protective effect of dietary fibre against cancer of the colon, as of 2020?\n(A) Propionic acid, formed during colonic fibre fermentation inhibits liver fatty acid synthesis (B) Butyric acid, formed during colonic fibre fermentation stimulates \"silencing\" of the SLC5A8 tumour suppressor gene (C) None of these options are correct (D) Butyric acid, formed during colonic fibre fermentation stimulates anti-oxidant defences in the colon\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. Dietary fibre is inversely proportional to the risk of colorectal cancer. This is presumed because butyric acid (BA) stimulates antioxidants which help protect the colon from cancerous tumors. The answer is (D).\n\nQ: In a cohort study, the risk ratio of developing diabetes was 0.86 when comparing consumers of tea (the exposed) to those who did not drink tea (the unexposed). Which one statement is correct (according to knowledge in 2020)?\n(A) The tea drinkers have lower risk of developing diabetes. (B) The tea drinkers have higher risk of developing diabetes. (C) Based on the information given we cannot tell if the observed difference in disease risk is the result of chance. (D) The risk ratio is close to the value one, so there is no difference in disease risk between the two groups.\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. The risk ratio is not sufficiently reduced that it could not be explained by random chance given the studies sample size. The answer is (C).", "philosophy": "The following are multiple choice questions (with answers) about philosophy.\n\nQ: The study of reality in the broadest sense, an inquiry into the elemental nature of the universe and the things in it, is known as _____.\n(A) metaphysics (B) epistemology (C) quantum physics (D) axiology\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Among the options, only metaphysics studies the nature of reality and existence. The answer is (A).\n\nQ: According to Moore\u2019s \u201cideal utilitarianism,\u201d the right action is the one that brings about the greatest amount of:\n(A) pleasure. (B) happiness. (C) good. (D) virtue.\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Moore's \"ideal utilitarianism\" states that one's actions should maximize intrinsic goods. The answer is (C).\n\nQ: Before Tolstoy's Christian conversion, what was his perspective on the meaning of life?\n(A) optimist (B) satisfied (C) nominally religious (D) pessimist\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Before his conversion, Tolstoy feels that life was uncertain, which is a pessimist's point of view. The answer is (D).\n\nQ: According to d'Holbach, people always act according to _____.\n(A) free choices (B) dictates of the soul (C) necessary natural laws (D) undetermined will\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. d'Holbach believes that people act according to necessary laws, and it proves nothing about people's free will. The answer is (C).\n\nQ: Psychological egoism is:\n(A) an ethical theory about how we ought to behave. (B) a generalization concerning the way people tend to behave. (C) a claim about human nature and the ways people are capable of behaving. (D) none of the above.\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Psychological egoism suggests that one behaves based on what makes one feels good, hence it is a claim about human nature and how humans are capable of behaving. The answer is (C).", "prehistory": "The following are multiple choice questions (with answers) about prehistory.\n\nQ: What is the approximate mean cranial capacity of Homo erectus?\n(A) under 650 cc (B) about 800 cc (C) just under 1000 cc (D) 1200 cc\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. The average cranium capacity of Homo erectus is less than 1000 cubic cm. The answer is (C).\n\nQ: According to Timothy Pauketat, the evidence for social stratification and political power at Cahokia suggests:\n(A) a center of Mississippian civilization with conditions similar to the rise of early states. (B) the limitations of authority in a Native American society of egalitarian foragers. (C) a simple chiefdom or perhaps a complex chiefdom had evolved by A.D. 1500. (D) a center of Mississippian civilization with conditions similar to societies on the Northwest Coast of North America.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Timothy Pauketat is known for his research on Cahokia, the center of the Mississippian culture, where he found similar conditions to the rise of early states. The answer is (A).\n\nQ: Recent research on hominid species dating from the Middle Pliocene indicates there was (as of 2020):\n(A) a great amount of species diversity, or a single species that exhibited a lot of diversity. (B) very little species diversity during this period and very few hominids. (C) decreased species diversity due to a prolonged ice age followed by a severe drought. (D) decreased species diversity but increased numbers of hammerstones and flakes, indicating stone tool manufacture.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Recent research has recognized multiple hominid species from the Middle Pliocene, meaning that there is a great amount of species diversity or diversity in a single species. The answer is (A).\n\nQ: Researchers now believe that the decline of the Maya was caused chiefly by:\n(A) a cataclysm of some kind, such as an earthquake, volcano, or tsunami. (B) ecological degradation resulting from slash-and-burn farming techniques. (C) endless wars between neighboring Mayan city-states. (D) practices of interbreeding that led to a steep rise in congenital disorders.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Researchers believe that the Maya collapse was mainly caused by over-exploitation of natural resources like the slash-and-burn farming techniques. The answer is (B).\n\nQ: The great Mayan king Pacal built temples in the city of Palenque in order to:\n(A) satisfy the powerful Mayan astronomer priests. (B) display his generosity to the common people, since they were allowed to live in the temples. (C) frighten away enemies, in particular the Spaniards. (D) legitimize his kingship, since his father was not royal.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Pacal built the temples as the funerary monument to legitimize his kingship. The answer is (D).", "professional_accounting": "The following are multiple choice questions (with answers) about professional accounting.\n\nQ: An auditor traces the serial numbers on equipment to a nonissuer\u2019s subledger. Which of the following management assertions is supported by this test?\n(A) Valuation and allocation (B) Completeness (C) Rights and obligations (D) Presentation and disclosure\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. The completeness assertion is tested by tracing supporting documents to the record entries. The answer is (B).\n\nQ: One hundred years ago, your great-great-grandmother invested $100 at 5% yearly interest. What is the investment worth today?\n(A) $13,000 (B) $600 (C) $15,000 (D) $28,000\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. A $100 investment at 5% yearly interest is worth 100*(1.05)^100=13150 after 100 years, which is around $13,000. The answer is (A).\n\nQ: On January 1, year 1, Alpha Co. signed an annual maintenance agreement with a software provider for $15,000 and the maintenance period begins on March 1, year 1. Alpha also incurred $5,000 of costs on January 1, year 1, related to software modification requests that will increase the functionality of the software. Alpha depreciates and amortizes its computer and software assets over five years using the straight-line method. What amount is the total expense that Alpha should recognize related to the maintenance agreement and the software modifications for the year ended December 31, year 1?\n(A) $5,000 (B) $13,500 (C) $16,000 (D) $20,000\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. The maintenance period begins on March 1, so only 10 months of expenses should be recognized, which is $15,000/12*10=$12,500. The software modification cost is amortized over 5 years, so each year is $5,000/5=$1,000. So the total expense is $12,500+$1,000=$13,500. The answer is (B).\n\nQ: Krete is an unmarried taxpayer with income exclusively from wages. By December 31, year 1, Krete's employer has withheld $16,000 in federal income taxes and Krete has made no estimated tax payments. On April 15, year 2, Krete timely filed for an extension request to file her individual tax return, and paid $300 of additional taxes. Krete's year 1 tax liability was $16,500 when she timely filed her return on April 30, year 2, and paid the remaining tax liability balance. What amount would be subject to the penalty for underpayment of estimated taxes?\n(A) $0 (B) $500 (C) $1,650 (D) $16,500\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. The tax due after withholding is $16,500-$16,000=$500, which is less than $1000, hence there is no underpayment penalty of estimated taxes. The answer is (A).\n\nQ: Box a nongovernmental not-for-profit organization had the following transactions during the year: Proceeds from sale of investments $80000 Purchase of property plant and equipment $10000 Proceeds from long-term debt $100000 Loss on sale of investment $5000 What amount should be reported as net cash provided by financing activities in Box's statement of cash flows?\n(A) $70,000 (B) $75,000 (C) $80,000 (D) 100000\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. Among the four transactions, only Proceeds from long-term debt belongs to the financing activities section of cashflow, hence the amount reported should be $100000. The answer is (D).", "professional_law": "The following are multiple choice questions (with answers) about professional law.\n\nQ: A son owed a creditor $5,000. The son's father contacted the creditor and told him that he wanted to pay the son's debt. The father signed a document that stated the father would pay the son's debt at a rate of $500 a month for 10 months. The creditor made no written or oral commitment to forbear to sue the son to collect the $5,000 debt, and the father made no oral or written request for any such forbearance. For the next five months, the father made and the creditor accepted the $500 monthly payments as agreed. During that period, the creditor, in fact, did forbear to take any legal action against the son. However, the father then informed the creditor that he would make no further payments on the debt. Which of the following is the most persuasive argument that the father is liable to the creditor under the terms of their agreement?\n(A) The father's promise and the creditor's reliance thereon, if proved, gave rise to a valid claim by the creditor against the father based on the doctrine of promissory estoppel. (B) Because it was foreseeable that the father's promise would induce the creditor to forbear taking any action against the son, such forbearance was, as a matter of law, a bargained-for consideration for the father's promise. (C) The father's five payments to the creditor totaling $2,500 manifested a serious intent on the father's part to be contractually bound, and such manifestation is generally recognized as an effective substitute for consideration. (D) By assuming the antecedent debt obligation that the son owed to the creditor, the father became a surety whose promise to the creditor was enforceable, since it was in writing and supported by adequate consideration. \nA: Let's think step by step. We refer to Wikipedia articles on law for help. The doctrine of promissory estoppel stops a person from going back on a promise in contract law, hence option (A) should be the most persuasive argument. The answer is (A).\n\nQ: A state has recently enacted a statute prohibiting the disposal of any nuclear wastes within the state. This law does not contravene or conflict with any federal statutes. A man operates a company in the state that is engaged in the disposal of nuclear wastes. Subsequent to the passage of the state statute, the man, not yet aware of the new law, entered into contracts with many out-of-state firms to dispose of their nuclear wastes in the state. On account of this new law, however, the man will be unable to perform these contracts. Assume that the man has standing to challenge this state law. Which of the following presents his strongest constitutional grounds to challenge the state law prohibiting the disposal of nuclear wastes within the state?\n(A) The commerce clause. (B) The equal protection clause of the Fourteenth Amendment. (C) The privileges and immunities clause of Article IV, Section 2. (D) The contract clause.\nA: Let's think step by step. We refer to Wikipedia articles on law for help. The commerce clause states that Congress shall have the power to regulate commerce with foreign Nations, and among the several States, and with the Indian Tribes. The statute affects inter-state commerce which puts it into question. Hence the man's strongest argument should be the commerce clause. The answer is (A).\n\nQ: On October 1, 1980, a developer, owner of several hundred acres in a rural county, drafted a general development plan for the area. The duly recorded plan imposed elaborate limitations and restrictions upon the land in the plan, which was to be developed as a residential district. The restrictions were to extend to all persons acquiring any of the lots and to their heirs, assigns, and lessees. It was further provided that all subsequent owners would be charged with due notice of the restrictions. Among those restrictions in the general plan were the following:(22) A franchise right is created in a strip of land 10 feet in width along the rear of each lot for the use of public utility companies with right of ingress and egress. (23) No house or structure of any kind shall be built on the aforementioned strip of land running through the said blocks. In 2000, a retiree purchased one of the lots, built a house, and erected a fence in the rear of his property within the restricted area. In 2004, a teacher purchased a lot adjacent to the retiree's property and built a new house. Two years later, a librarian purchased the lot that adjoined the teacher's property. The three deeds to those properties each contained references to the deed book where the general plan was recorded. In 2008, the librarian began the construction of a seven-foot post-and-rail fence along the line dividing his lot with the teacher's, and along the center of the area subject to the franchise right. Although the teacher objected to its construction, the fence was completed. If the teacher seeks a mandatory injunction to compel removal of the librarian's fence, the court will most likely\n(A) grant relief, because the fence was in violation of the easement restriction. (B) grant relief, because the encroachment of the fence violated the restriction in the original plan. (C) deny relief, because the teacher failed to enforce the restriction against the retiree. (D) deny relief, because the fence would not be construed as \"a structure\" within the terms of the restriction. \nA: Let's think step by step. We refer to Wikipedia articles on law for help. The restrictions in the original plan say no house or structure of any kind shall be built on the aforementioned strip of land running through the said blocks. Hence the court will most likely grant relief because the fence violated the restriction in the original plan. The answer is (B).\n\nQ: Judge took judicial notice of some facts at the beginning of the trial. Which of the following is not an appropriate kind of fact for judicial notice?\n(A) Indisputable facts. (B) Facts that have been asserted by individual political organizations. (C) Facts recognized to be true by common knowledge. (D) Facts capable of scientific verification.\nA: Let's think step by step. We refer to Wikipedia articles on law for help. Among the options, facts that have been asserted by individual political organizations is not an appropriate kind of fact for judicial notice. The answer is (B).\n\nQ: A state legislature has recently enacted a statute making it a misdemeanor to curse or revile or use obscene or opprobrious language toward or in reference to a police officer perfonning his duties. A student at a state university organized a demonstration on campus to protest the war. The rally was attended by a group of 50 students who shouted anti-war messages at cars passing by. To show his contempt for the United States, the student sewed the American flag to the rear of his jeans. When a police officer saw the flag sown on the student's jeans, he approached and told him to remove the flag or he would be placed under arrest. The student became angered and shouted at the police officer, \"Listen, you bastard, I'll wear this rag anywhere I please. \" The student was subsequently placed under arrest and charged with violating the state statute. The student subsequently brings suit in state court challenging the constitutionality of the statute. The strongest constitutional argument for the student is that\n(A) the statute is void for vagueness under the Fourteenth Amendment's due process clause. (B) the statute is invalid because it violates the petitioner's freedom of speech under the First Amendment. (C) the statute is an abridgment of freedom of speech under the First Amendment because less restrictive means are available for achieving the same purpose. (D) the statute is overbroad and consequently invalid under the First and Fourteenth Amendments.\nA: Let's think step by step. We refer to Wikipedia articles on law for help. The Fourteenth Amendment further supports the First Amendment by establishing a due process clause. Hence the strongest argument should be the statute is overbroad and consequently invalid under the First and Fourteenth Amendments. The answer is (D).", "professional_medicine": "The following are multiple choice questions (with answers) about professional medicine.\n\nQ: A 22-year-old male marathon runner presents to the office with the complaint of right-sided rib pain when he runs long distances. Physical examination reveals normal heart and lung findings and an exhalation dysfunction at ribs\u00a04-5 on the right. Which of the following muscles or muscle groups will be most useful in correcting this dysfunction utilizing a direct method?\n(A) anterior scalene (B) latissimus dorsi (C) pectoralis minor (D) quadratus lumborum\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Among the options, only pectoralis minor muscle origins from the outer surfaces of the 3rd to 5th ribs. The answer is (C).\n\nQ: A 36-year-old male presents to the office with a\u00a03-week\u00a0history of low back pain. He denies any recent trauma but says that he climbs in and out of his truck numerous times a day for his job. Examination of the patient in the prone position reveals a deep sacral sulcus on the left, a posterior inferior lateral angle on the right, and a lumbosacral junction that springs freely on compression. The most likely diagnosis is\n(A) left-on-left sacral torsion (B) left-on-right sacral torsion (C) right unilateral sacral flexion (D) right-on-right sacral torsion\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The deep sulcus on the left, a posterior ILA on the right, with a negative spring test suggests a right-on-right sacral torsion. All other options have a deep sulcus on the right. The answer is (D).\n\nQ: A 44-year-old man comes to the office because of a 3-day history of sore throat, nonproductive cough, runny nose, and frontal headache. He says the headache is worse in the morning and ibuprofen does provide some relief. He has not had shortness of breath. Medical history is unremarkable. He takes no medications other than the ibuprofen for pain. Vital signs are temperature 37.4\u00b0C (99.4\u00b0F), pulse 88/min, respirations 18/min, and blood pressure 120/84 mm Hg. Examination of the nares shows erythematous mucous membranes. Examination of the throat shows erythema and follicular lymphoid hyperplasia on the posterior oropharynx. There is no palpable cervical adenopathy. Lungs are clear to auscultation. Which of the following is the most likely cause of this patient's symptoms?\n(A) Allergic rhinitis (B) Epstein-Barr virus (C) Mycoplasma pneumonia (D) Rhinovirus\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The symptoms, especially the headache, suggest that the most likely cause is Rhinovirus. Epstein-Barr virus will cause swollen lymph nodes but there is no palpable cervical adenopathy. Lungs are clear to auscultation suggests it's not Mycoplasma pneumonia. The answer is (D).\n\nQ: A previously healthy 32-year-old woman comes to the physician 8 months after her husband was killed in a car crash. Since that time, she has had a decreased appetite and difficulty falling asleep. She states that she is often sad and cries frequently. She has been rechecking the door lock five times before leaving her house and has to count exactly five pieces of toilet paper before she uses it. She says that she has always been a perfectionist but these urges and rituals are new. Pharmacotherapy should be targeted to which of the following neurotransmitters?\n(A) Dopamine (B) Glutamate (C) Norepinephrine (D) Serotonin\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The patient feels sad and among the options, only Dopamine and Serotonin can help increase positive emotions. Serotonin also affects digestion and metabolism, which can help the patient's decreased appetite and sleep difficulty. The answer is (D).\n\nQ: A 42-year-old man comes to the office for preoperative evaluation prior to undergoing adrenalectomy scheduled in 2 weeks. One month ago, he received care in the emergency department for pain over his right flank following a motor vehicle collision. At that time, blood pressure was 160/100 mm Hg and CT scan of the abdomen showed an incidental 10-cm left adrenal mass. Results of laboratory studies, including complete blood count, serum electrolyte concentrations, and liver function tests, were within the reference ranges. The patient otherwise had been healthy and had never been told that he had elevated blood pressure. He takes no medications. A follow-up visit in the office 2 weeks ago disclosed elevated urinary normetanephrine and metanephrine and plasma aldosterone concentrations. The patient was referred to a surgeon, who recommended the adrenalectomy. Today, vital signs are temperature 36.6\u00b0C (97.9\u00b0F), pulse 100/min, respirations 14/min, and blood pressure 170/95 mm Hg. Physical examination discloses no significant findings. Initial preoperative preparation should include treatment with which of the following?\n(A) Labetalol (B) A loading dose of potassium chloride (C) Nifedipine (D) Phenoxybenzamine\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The symptoms and the adrenal mass suggested pheochromocytoma, and the blood pressure indicates hypertension. Phenoxybenzamine is used to treat hypertension caused by pheochromocytoma. The answer is (D).", "professional_psychology": "The following are multiple choice questions (with answers) about professional psychology.\n\nQ: In the construction of a multiple regression equation for purposes of prediction, the optimal combination of measures is one in which the predictors\n(A) are uncorrelated with each other but are moderately correlated with the criterion (B) have low correlations with each other and low correlations with the criterion (C) are highly intercorrelated with each other and moderately correlated with the criterion (D) have low correlations with the criterion bur are moderately correlated with each other\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. The basis of multiple regression is to assess the relationship between one continuous variable and a set of independent variables. So the predictors should be uncorrelated with each other but are moderately correlated with the criterion. The answer is (A).\n\nQ: There are three ways to measure the Central Tendency: the Mean, the Median and the Mode. From your knowledge about them, what is the mode?\n(A) less sensitive to extreme scores than the mean (B) more useful for skewed distributions (C) sensitive to extreme values and highly skewed distributions (D) the most frequently occurring number\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. The definition of mode is the most frequently occurring number. The answer is (D).\n\nQ: Carl Jung believed that a client's transference:\n(A) is a fantasy that distracts the client from reality. (B) represents \u201cmixed feelings\u201d toward the therapist. (C) \"is a form of \"\"acting out.\"\"\" (D) reflects the client\u2019s personal and collective unconscious.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Transference is a phenomenon that a person's feelings are unconsciously redirected, so it reflects the client's personal and collective unconscious. The answer is (D).\n\nQ: In terms of Hofstede\u2019s (1980) five cultural dimensions, the United States scores at the top of the scale on:\n(A) individualism. (B) individualism and power distance. (C) power distance and masculinity. (D) uncertainty avoidance.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. US scores highest on individualism among the five cultural dimensions. The answer is (A).\n\nQ: One of your therapy clients asks your advice about a good weight- reduction program. You have investigated the programs in the community and are enrolled in the one you consider the best. This program offers a $50 bonus to its patrons for each new person they bring into the program. Under these circumstances, your most appropriate response would be to\n(A) tell your client the pros and cons of each program you know about except for the one in which you are enrolled (B) recommend to your client the program in which you are enrolled and explain the $50 bonus you will receive (C) recommend to your client the program in which you are enrolled and offer to have the $50 bonus credited to your client's account in the program (D) tell your client the pros and cons of each program you know about, but do not claim the $50 bonus if your client enrolls in your program\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Based on the circumstances, you should tell your client about the pros and cons of each program, but it would be inappropriate to receive the bonus, so you should not claim the $50 bonus. The answer is (D).", "public_relations": "The following are multiple choice questions (with answers) about public relations.\n\nQ: Earth Hour was a campaign launched by which organization?\n(A) Greenpeace (B) The UN (C) Oxfam (D) World Wildlife Fund\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. Earth Hour is a worldwide movement oragnized launched by the World Wildlife Fund. The answer is (D).\n\nQ: In issues management, what is the most proactive approach to addressing negative or misleading information posted online about your organization?\n(A) Buy domain names that could be used by opposition groups. (B) Post anonymous comments on blogs to combat this information. (C) Prepare a news release that discredits the inaccurate information. (D) Make policy changes to address complaints highlighted on these sites.\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. In issues management, the most proactive approach to addressing negative or misleading information posted online is to make policy changes to address complaints highlighted on those sites. The answer is (D).\n\nQ: At which stage in the planning process would a situation analysis be carried out?\n(A) Defining the program (B) Planning the program (C) Taking action and implementing ideas (D) Evaluation of the program\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. Situation analyses are typically carried out during the planning process stage of defining the program. The answer is (A).\n\nQ: Which of these statements is true of the Vatican in 2010 at the time of the accusations of child abuse cover-ups?\n(A) There was a coordinated media response. (B) Consistent messages were communicated. (C) Criticisms were taken as attacks on the Catholic Church. (D) The credibility of the Vatican was upheld.\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. In 2010 when there were accusations of child abuse cover-ups, the Vatican took those criticisms as attacks on the Catholic Church. The answer is (C).\n\nQ: What should a public relations media practitioner do if she does not know the answer to a reporter's question?\n(A) Give the reporter other information she is certain is correct. (B) Say that the information is 'off the record' and will be disseminated later. (C) Say 'I don't know' and promise to provide the information later. (D) Say 'no comment,' rather than appear uninformed.\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. If a public relations media practitioner does not know the answer to a reporter's question, they should say 'I don't know' and offer to provide the information later. The answer is (C).", "security_studies": "The following are multiple choice questions (with answers) about security studies.\n\nQ: What are the frameworks of analysis within which terrorism has been considered (as of 2020)?\n(A) Competition between larger nations has resulted in some countries actively supporting terrorist groups to undermine the strength of rival states. Terrorist networks are extended patronage clubs maintained and paid for by their donor states and are conceptualised as being like state actors, to be dealt with using military force. (B) Globalization has enabled the internationalization of terrorist activities by opening up their operational space, although coordination is still managed from a geographical base. This suggests that terrorist groups are nationally structured which means that terrorism cannot be considered in terms of a war to be defeated militarily without having serious implications on the indigenous population. (C) Terrorism can be viewed as a problem to be resolved by military means (war on terrorism), by normal police techniques (terrorism as crime), or as a medical problem with underlying causes and symptoms (terrorism as disease). (D) Terrorism is viewed as a criminal problem. The criminalization of terrorism has two important implications. Firstly, it suggests that terrorism can be eradicated - terrorists can be caught and brought to trial by normal judicial proceedings thereby removing the threat from society - and secondly, it suggests that preventative crime techniques are applicable to prevent its development.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. (A) is wrong because it is not competition between larger nations that causes terrorism. \n(B) is wrong because globalization is not the cause of terrorism.\n(C) is correct because the US undertook the war on terrorism. \n(D) is wrong because preventative crime techniques will likely not end terrorism. The answer is (C).\n\nQ: Which of the following is the best lens through which to investigate the role of child soldiers?\n(A) Child soldiers are victims of combat that need re-education and rehabilitation. (B) Children and their mothers are not active subjects in warfare and are best considered as subjects in the private sphere. (C) Children are most often innocent bystanders in war and are best used as signifiers of peace. (D) Children have political subjecthood that is missed when they are considered as passive victims of warfare.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. Child soliders as a political topic can be missed when they are considered passive victims of warfare. The answer is (D).\n\nQ: How can we best describe the relationship between the state-centric approach and the concept of human security?\n(A) There are such wide divisions within the human security framework regarding the nature of threats and referent objects that no widely applicable comparisons between state-centric approaches and human security can be drawn. (B) By adopting the framework of human security, the limitations of the realist state-centric approach become evident. Whilst human security defines the referent object as the person or population, state-centric approaches prioritise the security of the state, de-prioritizing the pursuit of human security. (C) The state-centric approach to security is a faction of human security, usually defined within the broad school of human security. By being state-centric this approach prioritises the individual as the referent object in security studies. (D) Both the state-centric and human-centric approaches to security are mutually exclusive and offer a sufficient analytic framework with which to understand the international security system. It is therefore the role of security analysts to determine which of these substantial concepts is correct, and which should be discarded.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. Human security focuses on a person or population whereas state-centric approaches focus on the state while deprioritizing human security. The answer is (B).\n\nQ: In order to become securitized, a threat must be presented in which of these ways?\n(A) As an existential threat that requires immediate and extraordinary action, posing a threat to the survival of the state or to societal security. (B) As requiring immediate and extraordinary action by the state, threatening the survival of a referent object and therefore warranting the use of measures not normally employed in the political realm. (C) As an urgent threat to the survival of the referent object, so serious that it legitimises the employment of extraordinary action in response. (D) As an urgent threat to the survival of the audience that requires extraordinary or emergency measures.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. To be securitized, a threat must be an urgent threat to the survival of the referent object. The answer is (C).\n\nQ: What distinguishes coercive diplomacy from military force?\n(A) Compellence is another term for coercive diplomacy, but covering a narrower set of criteria; compellence covers those threats aimed at initiating adversary action. A threat to coerce a state to give up part of its territory would count as coercive diplomacy, as long as that threat proactively initiates action before reactive diplomacy is taken. (B) Coercive diplomacy constitutes the threats of limited force to induce adversary's incentive to comply with the coercer's demands. It is an influence strategy that is intended to obtain compliance: the use of force to defeat an opponent first does not count. It leaves an element of choice with the target to comply, or to continue. (C) Military force, or the threat of military force, utilises fear to achieve strategic objectives. Coercive diplomacy is differentiated from this approach, because it does not use fear as a tool for coercing an adversary. (D) Coercive diplomacy is employed to use force but to limit its effects on the international community. Coercive diplomacy is an aggressive strategy that is intended to obtain compliance through defeat. It does not leave an element of choice with the target, the target either being forced to comply or engage in conflict. It seeks to control by imposing compliance by removing any opportunity for negotiation or concession.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. Coercive diplomacy uses the threat of force to induce the opponent to comply with demands. The answer is (B).", "sociology": "The following are multiple choice questions (with answers) about sociology.\n\nQ: Which of the following is not a problem associated with official statistics on strike action?\n(A) most strikes go unnoticed by employers and the mass media (B) not all industrial disputes will be reported by the employer (C) the definition of strikes excludes those that involve fewer than ten workers or last less than one day (D) it is hard to compare strikes that were measured in different ways\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. Official statistics on strike action can be problematic because not all industrial disputes will be reported by employers, the definition of strikes excludes those that involves fewer than ten workers or last less than one day, and it is hard to compare strikes that were measured in different ways. Thus, (A) is not a problem associated with official statistics on strike action. The answer is (A).\n\nQ: What does Berger (1963) describe as a metaphor for social reality?\n(A) a fairground ride (B) a circus (C) a puppet theatre (D) a ballet\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. Berger describes social reality using the metaphor of a puppet theatre. The answer is (C).\n\nQ: The term 'hegemony' refers to:\n(A) the tendency for the working class not to realize their own interests (B) a dominant ideology that legitimates economic, political and cultural power (C) a form of dual consciousness based on ideology and everyday experiences (D) a mode of payment given for outstanding topiary\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. Hegemony refers to a dominant ideology that legitimates economic, policital, and cultural power. The answer is (B).\n\nQ: The shift from 'civil religion' to 'common religion' means that:\n(A) the increasing bureaucracy of the state has made religion only a marginal part of our lives (B) despite the weakening of traditional authority, our everyday lives and 'common sense' remain shaped by religious beliefs and values (C) religious participation in collective worship may have declined, but people still practise their faiths in private (D) people are much more likely to discuss their religious beliefs in public, informal settings\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. The shift from civil religion to common religion means that despite the weakening of traditional authority, our everyday lives and common sense remain shaped by religious beliefs and values. The answer is (B).\n\nQ: Which of the following did the post-war welfare state of 1948 not aim to provide:\n(A) free health care and education for all (B) a minimum wage (C) full employment (D) universal welfare\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. The post-war welfare state of 1948 aimed to provide free healthcare and education, full employment, and universal welfare. But it did not aim to provide a minimum wage. The answer is (B).", "us_foreign_policy": "The following are multiple choice questions (with answers) about us foreign policy.\n\nQ: How did Donald Trump attack globalization in the 2016 campaign?\n(A) Globalization had made men like him too rich (B) Globalization only benefited certain American states, such as New York (C) Liberal elites had encouraged globalization, while 'ordinary Americans' lost jobs because of it (D) Globalization encouraged damaging trade wars\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. Trump attacked globalization because he believed ordinary Americans lost jobs due to it, and so he wanted to blame liberals who had encouraged it. The answer is (C).\n\nQ: How did NSC-68 change U.S. strategy?\n(A) It globalized containment. (B) It militarized containment. (C) It called for the development of the hydrogen bomb. (D) All of the above\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. NSC-68 outlined a variety of courses of action, including globalization of containment, militarization of contaiment, and the development of the hydrogen bomb. The answer is (D).\n\nQ: How do Defensive Realism and Offensive Realism differ in their explanation of state behaviour?\n(A) Defensive realists place greater emphasis on the role of international institutions (B) Defensive realists place less emphasis on geographical factors (C) Offensive realists give more priority to the national interest than Defensive realists. (D) Defensive realists believe states are security maximizers, while Offensive realists believe states to be power maximizers\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. While defensive realism advocates that states are security maximizers, offensive realists think of states as power maximizers. The answer is (D).\n\nQ: The realm of policy decisions concerned primarily with relations between the United States and the rest of the world is known as\n(A) terrorism policy. (B) economic policy. (C) foreign policy. (D) international policy.\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. The topic of policy decisions concerns with relations between the US and the rest of the world is known as foreign policy. The answer is (C).\n\nQ: How did the 2008 financial crisis affect America's international reputation?\n(A) It damaged support for the US model of political economy and capitalism (B) It created anger at the United States for exaggerating the crisis (C) It increased support for American global leadership under President Obama (D) It reduced global use of the US dollar\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. The 2008 financial crisis damanged the international reputation of the American model of political economy and capitalism. The answer is (A).", "virology": "The following are multiple choice questions (with answers) about virology.\n\nQ: The median survival time to AIDS and death was established by following:\n(A) Seroprevalent HIV-infected individuals (B) Seronegatives (C) Seroconverters (D) High-risk seronegatives\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. The median survival time to AIDS and death was established as a result of the development of seroconverters. The answer is (C).\n\nQ: Which of the following is a morphological characteristic of the paramyxoviruses.\n(A) Fragile viruses often visualised with RNA spewing from the inside (B) Elongate viruses (C) Icosahedral viruses with envelope (D) Very large viruses\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. Paramyxoviruses are fragile viruses often visualised with RNA spewing from the inside. The answer is (A).\n\nQ: The most important goal of a behavioral intervention is:\n(A) Change in behavior (B) Comprehensive coverage (C) Effective use of behavioral theory (D) Sustained behavior change\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. The prim goal of a behavioral intervention is to cause sustained behavior change. The answer is (D).\n\nQ: A key factor facilitating the application of nested case-control studies from the MACS was:\n(A) Data collection (B) Establishment of a repository of biologic specimens (C) Participant interest (D) Administration of the questionnaire by staff\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. The Multicenter AIDS Cohort Study's use of nested case-control studies was facilitated by the establishment of a repository of biologic specimens. The answer is (B).\n\nQ: Why are parvoviruses a highly impactful parasite?\n(A) Because they have no nucleic acid (B) They require a helper virus (C) Only replicate in dividing cells (D) Can integrate into host chromosomes\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. Paroviruses are highly impactful because they do not have nucleic acid. The answer is (A).", "world_religions": "The following are multiple choice questions (with answers) about world religions.\n\nQ: How can the Upanishads be characterized?\n(A) Ritual texts (B) Philosophical texts (C) Hymns (D) Origin stories\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The Upanishads are the most recent part of Vedas (the oldest scriptures in Hinduism) and supplied the basis of later Hindu philosophy. So they are philosophical texts. The answer is (B).\n\nQ: What is the Second Gem in Buddhism?\n(A) The Dharma (B) The Sangha (C) The Buddha (D) The Bodhisattva\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The Second Gem in Buddhism is The Dharma. The answer is (A).\n\nQ: Which Japanese government promoted a kind of national cult based on the emperor and his associations with kami?\n(A) Honen (B) Tanaka (C) Tokugawa (D) Meiji\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The promotion of a national cult based on the emperor and his associations with Kami happened during the reign of Emperor Meiji (1852-1912). The answer is (D).\n\nQ: In which dynasty was the \"Mandate of Heaven\" developed to legitimatize the new rulers?\n(A) Shang (B) Zhou (C) Han (D) Xia\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The \"Mandate of Heaven\" was developed as an ancient Chinese philosophical concept during the Zhou Dynasty (1046-256 BCE). The answer is (B).\n\nQ: What is the sign of the covenant for Jewish males?\n(A) The rainbow (B) Circumcision (C) A son (D) Bar mitzvah\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. In Judaism, the most distinctive sign of the covenant is circumcision (brit milah). The answer is (B)."} \ No newline at end of file diff --git a/lm_eval/tasks/mmlu/gen_all_splits.py b/lm_eval/tasks/mmlu/_generate_configs.py similarity index 76% rename from lm_eval/tasks/mmlu/gen_all_splits.py rename to lm_eval/tasks/mmlu/_generate_configs.py index f6f7d96e..db0e20cd 100644 --- a/lm_eval/tasks/mmlu/gen_all_splits.py +++ b/lm_eval/tasks/mmlu/_generate_configs.py @@ -11,7 +11,7 @@ from lm_eval import utils from lm_eval.logger import eval_logger SUBJECTS = [ - # "abstract_algebra", + "abstract_algebra", "anatomy", "astronomy", "business_ethics", @@ -73,11 +73,14 @@ SUBJECTS = [ def parse_args(): parser = argparse.ArgumentParser() - # parser.add_argument("--benchmark_name", required=True) parser.add_argument("--base_yaml_path", required=True) parser.add_argument( - "--task_save_path", default="lm_eval/tasks/mmlu/hendrycks_test_original" + "--save_prefix_path", default="flan" ) + parser.add_argument( + "--cot_prompt_path", default=None + ) + parser.add_argument("--task_prefix", default="") return parser.parse_args() @@ -91,16 +94,25 @@ if __name__ == "__main__": base_yaml = yaml.full_load(f) print(base_yaml) + if args.cot_prompt_path is not None: + import json + with open(args.cot_prompt_path) as f: + cot_file = json.load(f) + for subject in tqdm(SUBJECTS): + if args.cot_prompt_path is not None: + description = cot_file[subject] + else: + description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n" yaml_dict = { "include": base_yaml_name, - "task": base_yaml["task"].strip("abstract_algebra") + "subject", + "task": f"mmlu_{args.task_prefix}_{subject}", "dataset_name": subject, - "description": f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n", + "description": description, } - file_save_path = args.task_save_path + f"_{subject}.yaml" + file_save_path = args.save_prefix_path + f"_{subject}.yaml" eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}") with open(file_save_path, "w") as yaml_file: yaml.dump(yaml_dict, yaml_file) diff --git a/lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml b/lm_eval/tasks/mmlu/default/hendrycks_test_original_default.yaml similarity index 100% rename from lm_eval/tasks/mmlu/hendrycks_test_original_default.yaml rename to lm_eval/tasks/mmlu/default/hendrycks_test_original_default.yaml diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml new file mode 100644 index 00000000..64587d54 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml @@ -0,0 +1,25 @@ +group: mmlu_flan_cot_fewshot +dataset_path: cais/mmlu +validation_split: validation +fewshot_split: dev +doc_to_text: "\n\nQ: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: Let's think step by step." +fewshot_delimiter: "" +output_type: greedy_until +doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "(?<=The answer is )(.*)(.)" + - function: "take_first" \ No newline at end of file diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_abstract_algebra.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_abstract_algebra.yaml new file mode 100644 index 00000000..5c549591 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_abstract_algebra.yaml @@ -0,0 +1,40 @@ +dataset_name: abstract_algebra +description: "The following are multiple choice questions (with answers) about abstract\ + \ algebra.\n\nQ: Statement 1 | Every element of a group generates a cyclic subgroup\ + \ of the group. Statement 2 | The symmetric group S_10 has 10 elements.\n(A) True,\ + \ True (B) False, False (C) True, False (D) False, True\nA: Let's think step by\ + \ step. A cyclic group is a group that is generated by a single element. Hence a\ + \ subgroup generated by a single element of a group is cyclic and Statement 1 is\ + \ True. The answer is (C).\n\nQ: The symmetric group $S_n$ has $\nactorial{n}$ elements,\ + \ hence it is not true that $S_{10}$ has 10 elements.\nFind the characteristic of\ + \ the ring 2Z.\n(A) 0 (B) 3 (C) 12 (D) 30\nA: Let's think step by step. A characteristic\ + \ of a ring is R is $n$ if the statement $ka = 0$ for all $a\\in 2Z$ implies that\ + \ $k$ is a multiple of $n$. Assume that $ka = 0$ for all $a\\in 2Z$ for some $k$.\ + \ In particular $2k = 0$. Hence $k=0$ and $n=0$. The answer is (A).\n\nQ: Statement\ + \ 1| Every function from a finite set onto itself must be one to one. Statement\ + \ 2 | Every subgroup of an abelian group is abelian.\n(A) True, True (B) False,\ + \ False (C) True, False (D) False, True\nA: Let's think step by step. Statement\ + \ 1 is true. Let $S$ be a finite set. If $f:S \nightarrow S$ is a onto function,\ + \ then $|S| = |f(S)|$. If $f$ was not one to one, then for finite domain $S$ the\ + \ image would have less than $S$ elements, a contradiction.\nStatement 2 is true.\ + \ Let $G$ be an abelian group and $H$ be a subgroup of $G$. We need to show that\ + \ $H$ is abelian. Let $a,b \\in H$. Then $a,b \\in G$ and $ab=ba$. Since $G$ is\ + \ abelian, $ab=ba$. Since $H$ is a subgroup of $G$, $ab \\in H$. Therefore, $ab=ba$\ + \ and $H$ is abelian. The answer is (A).\n\nQ: Statement 1 | If aH is an element\ + \ of a factor group, then |aH| divides |a|. Statement 2 | If H and K are subgroups\ + \ of G then HK is a subgroup of G.\n(A) True, True (B) False, False (C) True, False\ + \ (D) False, True\nA: Let's think step by step. Statement 2 is false. Let $H$ be\ + \ a subgroup of $S_3$ generated by the cycle $(1,2)$ and $K$ be a subgroup of $S_3$\ + \ generated by the cycle $(1,3)$. Both $H$ and $K$ have two elements, the generators\ + \ and the identity. However $HK$ contains cycles (1,2), (1,3) and (2,3,1), but the\ + \ inverse of (2,3,1) is (2,1,3) and it does not belong to HK, hence HK is not a\ + \ subgroup. The answer is (B).\n\nQ: Find all c in Z_3 such that Z_3[x]/(x^2 + c)\ + \ is a field.\n(A) 0 (B) 1 (C) 2 (D) 3\nA: Let's think step by step. Z_3[x]/(x^2\ + \ + c) is a field if and only if x^2 + c does not have roots in Z_3. That is x^2\ + \ + c != 0 for every x in Z_3. If c = 0, then x^2 + c = x^2 has root 0. If c = 1\ + \ then x^2 + c = x^2 + 1 = 0 + 1 for x = 0, 1 + 1 = 2 for x = 1 and 1 + 1 = 2 for\ + \ x = 2, hence x^2 + 1 does not have any roots. For c = 2 the polynomial x^2 + 2\ + \ has two roots at x = 1 and x = 2. Hence Z_3[x]/(x^2 + c) is a field if and only\ + \ if c = 1. The answer is (B)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_abstract_algebra diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_anatomy.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_anatomy.yaml new file mode 100644 index 00000000..28ca1c4c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_anatomy.yaml @@ -0,0 +1,57 @@ +dataset_name: anatomy +description: "The following are multiple choice questions (with answers) about anatomy.\n\ + \nQ: Which of the following is the body cavity that contains the pituitary gland?\n\ + (A) Abdominal (B) Cranial (C) Pleural (D) Spinal\nA: Let's think step by step. We\ + \ refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem\ + \ step by step. The pituitary gland is the major endocrine gland attached to the\ + \ base of the brain, and it is contained in the Cranial cavity. The answer is (B).\n\ + \nQ: Which of these branches of the trigeminal nerve contain somatic motor processes?\n\ + (A) The supraorbital nerve (B) The infraorbital nerve (C) The mental nerve (D) None\ + \ of the above\nA: Let's think step by step. We refer to Wikipedia articles on anatomy\ + \ for help. Let\u2019s solve this problem step by step. \nWe know the following:\ + \ (A) The supraorbital nerve (also known as the frontal nerve) is the largest branch\ + \ of the ophthalmic nerve and branch of ophthalmic division of the trigeminal nerve.\ + \ (B) The infraorbital nerve is a branch of the maxillary division of the trigeminal\ + \ nerve. (C) The mental nerve is a branch of the mandibular division of the trigeminal\ + \ nerve. Because all these nerves are purely sensory nerves and do not contain any\ + \ somatic motor processes. Therefore, the answer should be none of the above, which\ + \ is (D). The answer is (D).\n\nQ: In Angle's Class II Div 2 occlusion there is\n\ + (A) excess overbite of the upper lateral incisors. (B) negative overjet of the upper\ + \ central incisors. (C) excess overjet of the upper lateral incisors. (D) excess\ + \ overjet of the upper central incisors.\nA: Let's think step by step. We refer\ + \ to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step\ + \ by step. This is a question related to anatomy and orthodontics. Excess overjet\ + \ is associated with Class II occlusions; therefore, we can safely eliminate (B)\ + \ from the list, as negative overjet is often associated with Class III occlusions.\ + \ Now, we need to determine the location of the excess overjet, and that would be\ + \ the upper (maxillary) lateral incisors. Only (C) has the correct information.\ + \ The answer is (C).\n\nQ: The pleura\n(A) have no sensory innervation. (B) are\ + \ separated by a 2 mm space. (C) extend into the neck. (D) are composed of respiratory\ + \ epithelium.\nA: Let's think step by step. We refer to Wikipedia articles on anatomy\ + \ for help. Let\u2019s solve this problem step by step. First, recall that the pleura\ + \ refers to the thin layer of tissue that covers the lungs and lines the interior\ + \ wall of the chest cavity. Now, let\u2019s look at each option:\nOption (A): \u201C\ + The pleura have no sensory innervation.\u201D This information is not correct. The\ + \ pleura do have a sensory innervation.\nOption (B): \u201CThe pleura are separated\ + \ by a 2 mm space.\u201D This information is not correct. There is a very thin \u201C\ + potential\u201D space between the layers of the pleura; however, it is typically\ + \ filled with serous pleural fluid. \nOption (C): \u201CThe pleura extend into the\ + \ neck.\u201D This information is actuakky true. The cervical pleura, also known\ + \ as the dome of the pleuradome of the pleura, lines the extendsiton of the pleural\ + \ cavity into the neck.\nOption (D): \u201CThe pleura are composed of respiratory\ + \ epithelium.\u201D This information is not correct. The pleaura are composed of\ + \ connective tissue (CT).\nBecause (A), (B), and (D) are all incorrect, (D) is the\ + \ only correct answer. The answer is (C).\n\nQ: What is the embryological origin\ + \ of the hyoid bone?\n(A) The first pharyngeal arch (B) The first and second pharyngeal\ + \ arches (C) The second pharyngeal arch (D) The second and third pharyngeal arches\n\ + A: Let's think step by step. We refer to Wikipedia articles on anatomy for help.\ + \ Let\u2019s solve this problem step by step. The hyoid bone, which is also known\ + \ as the hyooid, is a a small U-shaped bone located in the anterior neck. In its\ + \ resting position, it lies between the ase of the mandible and the third cervical\ + \ vertebrae. We know that the second and the third pharyngeal arches give rise to\ + \ the horns of the hyoid bone; therefore, the embryological origin of the hyoid\ + \ bone are the second and the third pharyngeal arches\u2014this information is covered\ + \ in the last option (D). Therefore, we conclude that (D) must be the correct answer.\ + \ The answer is (D)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_anatomy diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_astronomy.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_astronomy.yaml new file mode 100644 index 00000000..cd50fd55 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_astronomy.yaml @@ -0,0 +1,54 @@ +dataset_name: astronomy +description: "The following are multiple choice questions (with answers) about astronomy.\n\ + \nQ: Where do most short-period comets come from and how do we know?\n(A) The Kuiper\ + \ belt; short period comets tend to be in the plane of the solar system just like\ + \ the Kuiper belt. (B) The Kuiper belt; short period comets tend to come from random\ + \ directions indicating a spherical distribution of comets called the Kuiper belt.\ + \ (C) The asteroid belt; short period comets have orbital periods similar to asteroids\ + \ like Vesta and are found in the plane of the solar system just like the asteroid\ + \ belt. (D) The Oort cloud; short period comets tend to be in the plane of the solar\ + \ system just like the Oort cloud.\nA: Let's think step by step. Most short-period\ + \ comets come from the Kuiper belt, and we know because short period coments tend\ + \ to be in the plane of the solar system, just like the Kuiper belt is. The answer\ + \ is (A).\n\nQ: You are pushing a truck along a road. Would it be easier to accelerate\ + \ this truck on Mars? Why? (Assume there is no friction)\n(A) It would be harder\ + \ since the truck is heavier on Mars. (B) It would be easier since the truck is\ + \ lighter on Mars. (C) It would be harder since the truck is lighter on Mars. (D)\ + \ It would be the same no matter where you are.\nA: Let's think step by step. If\ + \ we assume that there is no friction, the force needed to accelerate the truck\ + \ is by Newton\u2019s second law only dependent on the mass of the truck. Hence\ + \ (A), (B) and (C) are incorrect since it doesn\u2019t matter that it\u2019s on\ + \ Mars, and (D) is the correct answer. The answer is (D).\n\nQ: Say the pupil of\ + \ your eye has a diameter of 5 mm and you have a telescope with an aperture of 50\ + \ cm. How much more light can the telescope gather than your eye?\n(A) 10000 times\ + \ more (B) 100 times more (C) 1000 times more (D) 10 times more\nA: Let's think\ + \ step by step. The amount of light is proportional to the aperture area $A = \\\ + pi D^2/4$ for a lens with diameter $D$, so the relative amounts of light between\ + \ the eye with diameter 5mm and the telescope with diameter 50mm is $(50 cm)^2/(5mm)^2\ + \ = 10000$. The answer is (A).\n\nQ: Why isn't there a planet where the asteroid\ + \ belt is located?\n(A) A planet once formed here but it was broken apart by a catastrophic\ + \ collision. (B) There was not enough material in this part of the solar nebula\ + \ to form a planet. (C) There was too much rocky material to form a terrestrial\ + \ planet but not enough gaseous material to form a jovian planet. (D) Resonance\ + \ with Jupiter prevented material from collecting together to form a planet.\nA:\ + \ Let's think step by step. The asteroid belt is a stellar disc consisting of a\ + \ large number of asteroids between Mars and Jupiter's orbits. The asteroids in\ + \ this belt are affected by the gravitational pull from both other asteroids and\ + \ nearby planets. Due to the strong gravitational force of Jupiter there are resonances\ + \ that give rise to low density regions of asteroids known as the Kirkwood gap.\ + \ So (B) and (C) are not correct since it\u2019s not a lack of material that prevents\ + \ a planet from being formed, and (A) is incorrect because the Kirkwood gap would\ + \ have prevented a planet from forming in the first place, and (D) is the correct\ + \ option. The answer is (D).\n\nQ: Why is Mars red?\n(A) Because the surface is\ + \ covered with heavily oxidized (\"rusted\") minerals. (B) Because the atmosphere\ + \ scatters more light at bluer wavelengths transmitting mostly red light. (C) Because\ + \ Mars is covered with ancient lava flows which are red in color. (D) Because flowing\ + \ water on Mars's surface altered the surface minerals several billion years ago.\n\ + A: Let's think step by step. Option (B) is not correct because if the red color\ + \ was caused by the scattering off the atmosphere, then the earth with a much thicker\ + \ atmosphere would also look red. Options (C) and (D) are not specific enough about\ + \ why the color of the surface would be red, while (A) is correct because it explains\ + \ that the surface is red due to the rusted materials on the surface and the red\ + \ color comes from the rust. So the correct option is (A). The answer is (A)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_astronomy diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_business_ethics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_business_ethics.yaml new file mode 100644 index 00000000..60d939a8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_business_ethics.yaml @@ -0,0 +1,55 @@ +dataset_name: business_ethics +description: "The following are multiple choice questions (with answers) about business\ + \ ethics.\n\nQ: In contrast to _______, _______ aim to reward favourable behaviour\ + \ by companies. The success of such campaigns have been heightened through the use\ + \ of ___________, which allow campaigns to facilitate the company in achieving _________\ + \ .\n(A) Buycotts, Boycotts, Blockchain technology, Charitable donations (B) Buycotts,\ + \ Boycotts, Digital technology, Increased Sales (C) Boycotts, Buyalls, Blockchain\ + \ technology, Charitable donations (D) Boycotts, Buycotts, Digital technology, Increased\ + \ Sales\nA: Let's think step by step. We refer to Wikipedia articles on business\ + \ ethics for help. The sentence that best uses the possible options above is \u201C\ + In contrast to *boycotts*, *buycotts* aim to reward favourable behavior by companies.\ + \ The success of such campaigns have been heightened through the use of *digital\ + \ technology*, which allow campaigns to facilitate the company in achieving *increased\ + \ sales*.\u201D The answer is (D).\n\nQ: _______ is the direct attempt to formally\ + \ or informally manage ethical issues or problems, through specific policies, practices\ + \ and programmes.\n(A) Corporate social responsibility (B) Business ethics management\ + \ (C) Sustainability (D) Environmental management\nA: Let's think step by step.\ + \ We refer to Wikipedia articles on business ethics for help. The direct attempt\ + \ manage ethical issues through specific policies, practices, and programs is business\ + \ ethics management. The answer is (B).\n\nQ: Three contrasting tactics that CSO's\ + \ can engage in to meet their aims are ________ which typically involves research\ + \ and communication, ________, which may involve physically attacking a company's\ + \ operations or ________, often involving some form of _______.\n(A) Non-violent\ + \ direct action, Violent direct action, Indirect action, Boycott (B) Indirect action,\ + \ Instrumental action, Non-violent direct action, Information campaign (C) Indirect\ + \ action, Violent direct action, Non-violent direct-action Boycott (D) Non-violent\ + \ direct action, Instrumental action, Indirect action, Information campaign\nA:\ + \ Let's think step by step. We refer to Wikipedia articles on business ethics for\ + \ help. The sentence that best uses the possible options above is \u201CThree contrasting\ + \ tactics that CSO's can engage in to meet their aims are *indirect action*, which\ + \ typically involves research and communication, *violent direct action*, which\ + \ may involve physically attacking a company's operations or *non-violent direct\ + \ action*, often involving some form of *boycott*.\u201D The answer is (C).\n\n\ + Q: To ensure the independence of the non-executive board members, there are a number\ + \ of steps which can be taken, which include non-executives being drawn from _______\ + \ the company, being appointed for a _________ time period as well as being appointed\ + \ _________.\n(A) Outside, Limited, Independently (B) Inside, Limited, Intermittently\ + \ (C) Outside, Unlimited, Intermittently (D) Inside, Unlimited, Independently\n\ + A: Let's think step by step. We refer to Wikipedia articles on business ethics for\ + \ help. The sentence that best uses the possible options above is \u201CTo ensure\ + \ the independence of the non-executive board members, there are a number of steps\ + \ which can be taken, which include non-executives being draw from *outside* the\ + \ company, being appointed for a *limited* time period as well as being imported\ + \ *independently*. The answer is (A).\n\nQ: Beyond the business case for engaging\ + \ in CSR there are a number of moral arguments relating to: negative _______, the\ + \ _______that corporations possess and the ________ of business and society.\n(A)\ + \ Externalities, Power, Independence (B) Publicity, Insubstantial resources, Mutual\ + \ dependence (C) Publicity, Power, Independence (D) Externalities, Power, Mutual\ + \ dependence\nA: Let's think step by step. We refer to Wikipedia articles on business\ + \ ethics for help. The sentence that best uses the possible options above is \u201C\ + Beyond the business case for engaging the CSR there are a number of moral arguments\ + \ relating to: negative *externalities*, the *power* that corporations possess and\ + \ the *mutual independence* of business and society. The answer is (D)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_business_ethics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_clinical_knowledge.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_clinical_knowledge.yaml new file mode 100644 index 00000000..a591a7bd --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_clinical_knowledge.yaml @@ -0,0 +1,58 @@ +dataset_name: clinical_knowledge +description: 'The following are multiple choice questions (with answers) about clinical + knowledge. + + + Q: Glycolysis is the name given to the pathway involving the conversion of: + + (A) glycogen to glucose-1-phosphate. (B) glycogen or glucose to fructose. (C) glycogen + or glucose to pyruvate or lactate. (D) glycogen or glucose to pyruvate or acetyl + CoA. + + A: Let''s think step by step. We refer to Wikipedia articles on clinical knowledge + for help. Glycolysis is the name given to the pathway involving conversion of glycogen + or glucose to pyruvate or lactate. The answer is (C). + + + Q: What is the difference between a male and a female catheter? + + (A) Male and female catheters are different colours. (B) Male catheters are longer + than female catheters. (C) Male catheters are bigger than female catheters. (D) + Female catheters are longer than male catheters. + + A: Let''s think step by step. We refer to Wikipedia articles on clinical knowledge + for help. The difference between a male and female catheter is that male catheters + tend to be longer than female catheters. The answer is (B). + + + Q: How many attempts should you make to cannulate a patient before passing the job + on to a senior colleague, according to the medical knowledge of 2020? + + (A) 4 (B) 3 (C) 2 (D) 1 + + A: Let''s think step by step. We refer to Wikipedia articles on clinical knowledge + for help. According to the medical protocol as of 2020, you should make two attempts + to cannulate a patient before passing the job on to a more-senior practitioner. + The answer is (C). + + + Q: In the assessment of the hand function which of the following is true? + + (A) Abduction of the thumb is supplied by spinal root T2 (B) Opposition of the thumb + by opponens policis is supplied by spinal root T1 (C) Finger adduction is supplied + by the median nerve (D) Finger abduction is mediated by the palmar interossei + + A: Let''s think step by step. We refer to Wikipedia articles on clinical knowledge + for help. Of all the options, it is only true that the opposition of the thumb by + opponens pollicis is supplied by spinal root T1. The answer is (B). + + + Q: The energy for all forms of muscle contraction is provided by: + + (A) ATP. (B) ADP. (C) phosphocreatine. (D) oxidative phosphorylation. + + A: Let''s think step by step. We refer to Wikipedia articles on clinical knowledge + for help. The energy for muscular contraction is provided by ATP (adenosine triphosphate), + which is the powerhouse of the cell. The answer is (A).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_clinical_knowledge diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_biology.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_biology.yaml new file mode 100644 index 00000000..be51794a --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_biology.yaml @@ -0,0 +1,60 @@ +dataset_name: college_biology +description: "The following are multiple choice questions (with answers) about college\ + \ biology.\n\nQ: Which of the following represents an accurate statement concerning\ + \ arthropods?\n(A) They possess an exoskeleton composed primarily of peptidoglycan.\ + \ (B) They possess an open circulatory system with a dorsal heart. (C) They are\ + \ members of a biologically unsuccessful phylum incapable of exploiting diverse\ + \ habitats and nutrition sources. (D) They lack paired, jointed appendages.\nA:\ + \ Let's think step by step. Peptidoglycan is known to comprise the plasma membrane\ + \ of most bacteria, rather than the exoskeleton of arthropods, which is made of\ + \ chitin, which rules out (A). The answer (C) is false because arthropods are a\ + \ highly successful phylum. Likewise, arthropods have paired, jointed appendages,\ + \ which rules out (D). The only remaining option is (B), as arthropods have an open\ + \ circulatory system with a dorsal tubular heart. The answer is (B).\n\nQ: In a\ + \ given population, 1 out of every 400 people has a cancer caused by a completely\ + \ recessive allele, b. Assuming the population is in Hardy-Weinberg equilibrium,\ + \ which of the following is the expected proportion of individuals who carry the\ + \ b allele but are not expected to develop the cancer?\n(A) 1/400 (B) 19/400 (C)\ + \ 20/400 (D) 38/400\nA: Let's think step by step. According to the Hardy Weinberg\ + \ Law, $p^2 + 2 p q + q^2 = 1$, and $p + q = 1$ where $p$ is the frequency of the\ + \ dominant allele, $q$ is the frequency of the recessive allele, and $p^2$, $q^2$,\ + \ and $2pq$ are the frequencies of dominant homozygous, recessive homozygous, and\ + \ heterozygous individuals, respectively. \u200BThe frequency of the recessive allele\ + \ (q) is $\\sqrt{\frac{1}{400}} = 0.05$. We have $p = 1 - q = 0.95$. The frequency\ + \ of heterozygous individuals is $2pq = 2 \\cdot 0.05 \\cdot 0.95 = 0.095$. The\ + \ number of heterozygous individuals is equal to the frequency of heterozygous individuals\ + \ times the size of the population, or $0.095 * 400 = 38$. So we end up with 38/400.\ + \ The answer is (D).\n\nQ: According to the pressure-flow model of movement of phloem\ + \ contents, photosynthate movement from source to sink is driven by\n(A) an ATP-dependent\ + \ pressure-flow pump (B) a water-pressure potential gradient (C) transpiration (D)\ + \ apoplastic diffusion\nA: Let's think step by step. It is a gradient in water pressure\ + \ that induces the movement of phloem content, which refers to answer (B). The mechanism\ + \ of movement does not rely on metabolism, which rules out (A). Transpiration refers\ + \ to the exhalation of water vapor through plant stomata, and is also not related,\ + \ which rules out (C). While the apoplastic pathway is one of two main pathways\ + \ for water transport in plants, it is not central to the pressure flow model, which\ + \ rules out (D). The answer is (B).\n\nQ: Which of the following contain DNA sequences\ + \ required for the segregation of chromosomes in mitosis and meiosis?\n(A) Telomeres\ + \ (B) Centromeres (C) Nucleosomes (D) Spliceosomes\nA: Let's think step by step.\ + \ The genetic material in Telomeres is not used, which rules out (A). Nucleosomes\ + \ are the repeating subunit that comprises chromatin packed in a cell nucleus, and\ + \ do not specifically refer to DNA sequences necessary for segregating chromosomes\ + \ in cell division, which rules out (C). A spliceosome is a large ribonucleoprotein\ + \ that removes introns from transcribed pre-mRNA rather than governing chromosome\ + \ segregation. Centromeres are directly responsible for segregating chromosomes\ + \ in cell division. The answer is (B).\n\nQ: The presence of homologous structures\ + \ in two different organisms, such as the humerus in the front limb of a human and\ + \ a bird, indicates that\n(A) the human and bird are polyphyletic species (B) a\ + \ human's and bird's evolution is convergent (C) the human and bird belong to a\ + \ clade (D) the human and bird developed by analogy\nA: Let's think step by step.\ + \ Polyphyletic species are organisms that are grouped due to having similar characteristics\ + \ but which do not have a common ancestor. This is not the case for humans and birds,\ + \ which rules out (A). Convergent evolution refers to the indepdendent development\ + \ of similar features in different species at different periods, which is also not\ + \ the case for humans and birds, which rules out (B). Analogy refers to the superficial\ + \ resemblance of structures that have different origins, which is not the case for\ + \ the human and bird forearms, which rules out (D). Humans and birds do belong to\ + \ the same clade - a group of organisms composed of a common ancestor. The answer\ + \ is (C)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_college_biology diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_chemistry.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_chemistry.yaml new file mode 100644 index 00000000..a02c909e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_chemistry.yaml @@ -0,0 +1,37 @@ +dataset_name: college_chemistry +description: "The following are multiple choice questions (with answers) about college\ + \ chemistry.\n\nQ: 3 Cl\u2212(aq) + 4 CrO_4^2\u2212(aq) + 23 H+(aq) \u2192 3 HClO2(aq)\ + \ + 4 Cr3+(aq) + 10 H2O(l). In the reaction shown above, Cl\u2212(aq) behaves as\n\ + (A) an acid (B) a base (C) a catalyst (D) a reducing agent\nA: Let's think step\ + \ by step. A molecule that behaves as a base accepts an H+ ion (or proton) from\ + \ another molecule, whereas a molecule that behaves as an acid donates an H+ ion\ + \ (or proton) to another molecule. Neither of these is the case for Cl in this reaction,\ + \ which rules out (A) and (B). A catalyst is a substance that only accelerates a\ + \ reaction without itself undergoing chemical change, which is not the case here.\ + \ This rules out (C). Instead, the $Cl^{-} molecules carry a negative charge, which\ + \ they donate in the reaction to form 3 HClO2. This is the behavior of a reducing\ + \ agent, or (D). The answer is (D).\n\nQ: Which of the following statements about\ + \ the lanthanide elements is NOT true?\n(A) The most common oxidation state for\ + \ the lanthanide elements is +3. (B) Lanthanide complexes often have high coordination\ + \ numbers (> 6). (C) All of the lanthanide elements react with aqueous acid to liberate\ + \ hydrogen. (D) The atomic radii of the lanthanide elements increase across the\ + \ period from La to Lu.\nA: Let's think step by step. The atomic radii of the lanthanide\ + \ elements in fact decrease across the period from La to Lu. Options (A), (B), and\ + \ (C) are all true. This means that only (D) is NOT true. The answer is (D).\n\n\ + Q: Which of the following lists the hydrides of group-14 elements in order of thermal\ + \ stability, from lowest to highest?\n(A) PbH4 < SnH4 < GeH4 < SiH4 < CH4 (B) PbH4\ + \ < SnH4 < CH4 < GeH4 < SiH4 (C) CH4 < SiH4 < GeH4 < SnH4 < PbH4 (D) CH4 < PbH4\ + \ < GeH4 < SnH4 < SiH4\nA: Let's think step by step. The thermal stability of group-14\ + \ hydrides decreases as we move from the top of group 14 to the bottom. The order\ + \ of elements in the group from top to bottom is C, Si, Ge, Sn, Pb. Therefore in\ + \ order of increasing thermal stability we have PbH4, SnH4, GeH4, SiH4, and CH4,\ + \ or answer (A). The answer is (A).\n\nQ: Predict the number of lines in the EPR\ + \ spectrum of a solution of 13C-labelled methyl radical (13CH3\u2022), assuming\ + \ the lines do not overlap.\n(A) 4 (B) 3 (C) 6 (D) 24 (E) 8\nA: Let's think step\ + \ by step. The electron paramagnetic resonance spectrum will be split by two forms\ + \ of interactions. The first is the hyperfine interaction with the 13C (nuclear\ + \ spin $I = \nrac{1}{2}$) which will split the spectrum into 2 lines. This will\ + \ be further split into 4 lines by the interaction with three equivalent 1H nuclei.\ + \ The total number of lines is therefore $2 \\cdot 4 = 8$. The answer is (E)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_college_chemistry diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_computer_science.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_computer_science.yaml new file mode 100644 index 00000000..20b398c1 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_computer_science.yaml @@ -0,0 +1,189 @@ +dataset_name: college_computer_science +description: 'The following are multiple choice questions (with answers) about college + computer science. + + + Q: Which of the following regular expressions is equivalent to (describes the same + set of strings as) (a* + b)*(c + d)? + + (A) a*(c + d)+ b(c + d) + + (B) a*(c + d)* + b(c + d)* + + (C) a*(c + d)+ b*(c + d) + + (D) (a + b)*c +(a + b)*d + + A: Let''s think step by step. We know that: + + 1. (X* + Y)* = (X + Y)* + + 2. X(Y + Z)? = XY + XZ + + Using equation 1 we can rewrite (a* + b)*(c + d)? as: + + 3. (a + b)*(c + d)? + + Using equation 2 we can rewrite equation 3 as: + + (a + b)*c + (a + b)*d The answer is (D). + + + Q: The Singleton design pattern is used to guarantee that only a single instance + of a class may be instantiated. Which of the following is (are) true of this design + pattern? + + I. The Singleton class has a static factory method to provide its instance. + + II. The Singleton class can be a subclass of another class. + + III. The Singleton class has a private constructor. + + (A) I only + + (B) II only + + (C) III only + + (D) I, II, and III + + A: Let''s think step by step. Statement I is a correct statement about a Singleton, + because a Singleton restricts instantiation to a single, static method. Statement + II is also correct, because there is no inherent restriction regarding the inheritance + of a Singleton. Statement III is also correct, because a Singletons must be instantiated + only once, so its constructor is made private to prevent any construction except + via its static factory method. + + Given these facts, statements I, II, and III are all correct. The answer is (D). + + + Q: A certain pipelined RISC machine has 8 general-purpose registers R0, R1, . . + . , R7 and supports the following operations: + + ADD Rs1, Rs2, Rd (Add Rs1 to Rs2 and put the sum in Rd) + + MUL Rs1, Rs2, Rd (Multiply Rs1 by Rs2 and put the product in Rd) + + An operation normally takes one cycle; however, an operation takes two cycles if + it produces a result required by the immediately following operation in an operation + sequence. + + Consider the expression AB + ABC + BC, where variables A, B, C are located in registers + R0, R1, R2. If the contents of these three registers must not be modified, what + is the minimum number of clock cycles required for an operation sequence that computes + the value of AB + ABC + BC? + + (A) 5 (B) 6 (C) 7 (D) 8 + + A: Let''s think step by step. First, we are given that A is in R0, B is in R1, and + C is in R2. + + Next, we can see that we must compute three multiplies (AB, BC, and ABC) and two + adds (AB + ABC, (AB + ABC) + BC) to compute our final answer, resulting in a minimum + of five clock cycles. + + Next, we can see that there is no way to avoid at least one pipeline stall when + computing our final answer, because to compute our final sum we must wait at least + one cycle for the results from the previous stage to be ready. Thus, our minimum + number of cycles must be 6. + + We can verify that we can create a solution that requires only six cycles as follows: + + compute AB: MUL R0, R1, R3 + + compute BC: MUL R1, R2, R4 + + compute ABC: MUL R3, R4, R5 + + compute AB + BC: ADD R3, R4, R6 + + STALL + + compute AB + ABC + BC: ADD R5, R6, R7 + + So there are 6 cycles. The answer is (B). + + + Q: A compiler generates code for the following assignment statement. + + G := (A + B) * C - (D + E) * F + + The target machine has a single accumulator and a single-address instruction set + consisting of instructions load, store, add, subtract, and multiply. For the arithmetic + operations, the left operand is taken from the accumulator and the result appears + in the accumulator. The smallest possible number of instructions in the resulting + code is + + (A) 5 (B) 6 (C) 7 (D) 9 + + A: Let''s think step by step. We can compute the final answer with the following + sequence of operations: + + 1. LOAD D (accumulator = D) + + 2. ADD E (accumulator = D+E) + + 3. MUL F (accumulator = (D+E)*F) + + 4. STORE X (X = (D+E)*F) + + 5. LOAD A (accumulator = A) + + 6. ADD B (accumulator = A+B) + + 7. MUL C (accumulator = (A+B)*C) + + 8. SUB X (accumulator = (A+B)*C - (D+E)*F) + + 9. STORE G (G = (A+B)*C - (D+E)*F) + + This sequence takes 9 instructions. The answer is (D). + + + Q: Consider a computer design in which multiple processors, each with a private + cache memory, share global memory using a single bus. This bus is the critical system + resource. Each processor can execute one instruction every 500 nanoseconds as long + as memory references are satisfied by its local cache. When a cache miss occurs, + the processor is delayed for an additional 2,000 nanoseconds. During half of this + additional delay, the bus is dedicated to serving the cache miss. During the other + half, the processor cannot continue, but the bus is free to service requests from + other processors. On average, each instruction requires 2 memory references. On + average, cache misses occur on 1 percent of references. What proportion of the capacity + of the bus would a single processor consume, ignoring delays due to competition + from other processors? + + (A) 1/50 (B) 1/27 (C) 1/25 (D) 2/27 + + A: Let''s think step by step. We know that each instruction requires two memory + references per instruction, and that there is an average cache miss rate of one + percent. + + Thus a given processor has: + + (1 cache miss / 100 references) * (2 references / instruction) = + + (2 cache misses / 100 instructions), so: + + misses_per_instruction = 1 cache miss / 50 instructions. + + Next, we know that each instruction requires 500 nanoseconds when there is no cache + miss, and 500 + 2000 = 2500 nanoseconds when there is a cache miss. Thus: + + 50 instructions / (49 * 500) + (1 * 2500) nanoseconds, so: + + instructions_per_ns = 50 instructions / 27000 nanoseconds. + + Now, we know that each cache miss locks the bus for half of the 2000 nanosecond + cache miss delay, or 1000 nanoseconds, so: + + lock_ns_per_miss = 1000 nanoseconds / cache miss. + + Thus we can see that on average a single processor will lock the bus for: + + lock_ns_per_miss * misses_per_instruction * instructions_per_ns = + + (1000 nanoseconds / cache miss) * (1 cache miss / 50 instructions) * (50 instructions + / 27000 nanoseconds) = 1000 * (1/50) * (50/27000) = 1000/27000 = 1/27. The answer + is (B).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_college_computer_science diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_mathematics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_mathematics.yaml new file mode 100644 index 00000000..4442f9ed --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_mathematics.yaml @@ -0,0 +1,49 @@ +dataset_name: college_mathematics +description: "The following are multiple choice questions (with answers) about college\ + \ mathematics.\n\nQ: Let V be the set of all real polynomials p(x). Let transformations\ + \ T, S be defined on V by T:p(x) -> xp(x) and S:p(x) -> p'(x) = d/dx p(x), and interpret\ + \ (ST)(p(x)) as S(T(p(x))). Which of the following is true?\n(A) ST = 0 (B) ST =\ + \ T (C) ST = TS (D) ST - TS is the identity map of V onto itself.\nA: Let's think\ + \ step by step. For a given polynomial $p$ we have\n\\[ST(p) = (xp(x))\u2019 = p(x)\ + \ + xp\u2019(x)\\]\nand\n\\[TS(p) = xp\u2019(x).\\]\nHence \\[ST(p) - TS(p) = p(x)\ + \ + xp\u2019(x) - xp\u2019(x).\\] The answer is (D).\n\nQ: Suppose that f(1 + x)\ + \ = f(x) for all real x. If f is a polynomial and f(5) = 11, then f(15/2)\n(A) -11\ + \ (B) 0 (C) 11 (D) 33/2\nA: Let's think step by step. The only polynomial so that\ + \ $f(1 + x) = f(x)$ is a constant polynomial. Hence $f(5) = 11 = f(15/2)$. The answer\ + \ is (C).\n\nQ: Let A be a real 2x2 matrix. Which of the following statements must\ + \ be true?\nI. All of the entries of A^2 are nonnegative.\nII. The determinant of\ + \ A^2 is nonnegative.\nIII. If A has two distinct eigenvalues, then A^2 has two\ + \ distinct eigenvalues.\n(A) I only (B) II only (C) III only (D) II and III only\n\ + A: Let's think step by step. We have \\[ det(A^2) = (det(A))^2 \\geq 0,\\] hence\ + \ II holds.\nIII is false: as a counterexample take a diagonal matrix with -1 and\ + \ 1 on the diagonal. Then $A^2$ is the identity matrix. The answer is (B).\n\nQ:\ + \ Let A be the set of all ordered pairs of integers (m, n) such that 7m + 12n =\ + \ 22. What is the greatest negative number in the set B = {m + n : (m, n) \\in A}?\n\ + (A) -5 (B) -4 (C) -3 (D) -2\nA: Let's think step by step. We have 12n = 22 - 7m\ + \ and one of the solutions is $m = -2$, $n = 3$. Then $m + n = 1$, hence we need\ + \ to look for smaller $m$ in order to make $m + n$ negative. The next solution is\ + \ $m = -14$ and $n = 10$. For smaller $m$ we have $m + n$ smaller than $-4$. The\ + \ answer is (B).\n\nQ: A tank initially contains a salt solution of 3 grams of salt\ + \ dissolved in 100 liters of water. A salt solution containing 0.02 grams of salt\ + \ per liter of water is sprayed into the tank at a rate of 4 liters per minute.\ + \ The sprayed solution is continually mixed with the salt solution in the tank,\ + \ and the mixture flows out of the tank at a rate of 4 liters per minute. If the\ + \ mixing is instantaneous, how many grams of salt are in the tank after 100 minutes\ + \ have elapsed?\n(A) 2 (B) 2 - e^-2 (C) 2 + e^-2 (D) 2 + e^-4\nA: Let's think step\ + \ by step. For all $t \\in \\mathbb{R}$, let $s(t)$ denote the number grams of salt\ + \ in the tank at the $t$ minute mark. Then $s(0) = 3$.\nWe use $s$ and $s(t)$ interchangeably.\ + \ We also use $s^{\\prime}$ and $s^{\\prime}(t)$ interchangeably. The solution sprayed\ + \ into the tank adds $(0.02) 4=2 / 25$ grams of salt per minute. There are always\ + \ 100 liters of liquid in the tank, containing $s$ grams of salt. So the density\ + \ of salt in the tank is $s / 100$ grams per liter. The flow of water out of the\ + \ tank therefore subtracts $4(s / 100)=s / 25$ grams of salt per minute. Then, for\ + \ all $t \\in \\mathbb{R}$, we have $s^{\\prime}(t)=(2 / 25)-(s / 25)=(2-s) / 25$,\ + \ and so $[s(t)=2] \\Rightarrow\\left[s^{\\prime}(t)=0\right]$. For all $t \\in\ + \ \\mathbb{R}$,\n$$\n\frac{d}{d t}[\\ln (s-2)]=\frac{s^{\\prime}}{s-2}=\frac{-1}{25}=\f\ + rac{d}{d t}\\left[-\frac{t}{25}\right] .\n$$\nChoose $C \\in \\mathbb{R}$ such that,\ + \ for all $t \\in \\mathbb{R}, \\ln ((s(t)-2))=-[t / 25]+C$. Let $K:=e^{C}$. Then,\ + \ for all $t \\in \\mathbb{R}$, we have $(s(t))-2=K e^{-t / 25}$, and so $s(t)=2+K\ + \ e^{-t / 25}$. Then $3=s(0)=2+K e^{0}=2+K$, so $K=1$. Then $s(100)=2+K e^{-100\ + \ / 25}=2+1 \\cdot e^{-4}=2+e^{-4}$. The answer is (D)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_college_mathematics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_medicine.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_medicine.yaml new file mode 100644 index 00000000..8f3ae14e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_medicine.yaml @@ -0,0 +1,52 @@ +dataset_name: college_medicine +description: "The following are multiple choice questions (with answers) about college\ + \ medicine.\n\nQ: An expected side effect of creatine supplementation is:\n(A) muscle\ + \ weakness. (B) gain in body mass. (C) muscle cramps. (D) loss of electrolytes.\n\ + A: Let's think step by step. We refer to Wikipedia articles on medicine for help.\ + \ Creatine supplementation is a dietary supplement that results in body mass gain.\ + \ The answer is (B).\n\nQ: Which of the following is not a true statement?\n(A)\ + \ Muscle glycogen is broken down enzymatically to glucose-1-phosphate (B) Elite\ + \ endurance runners have a high proportion of Type I fibres in their leg muscles\ + \ (C) Liver glycogen is important in the maintenance of the blood glucose concentration\ + \ (D) Insulin promotes glucose uptake by all tissues in the body\nA: Let's think\ + \ step by step. We refer to Wikipedia articles on medicine for help. Let\u2019s\ + \ solve this step by step and go over each choice: \n(A) \u201CMuscle glycogen is\ + \ broken down enzymatically to glucose-1-phosphate\u201D: This is a correct statement.\n\ + (B) \u201CElite endurance runners have a high proportion of Type I fibres in their\ + \ leg muscles\u201D: This is a correct statement.\n(C) \u201CLiver glycogen is important\ + \ in the maintenance of the blood glucose concentration\u201D: This is a correct\ + \ statement. \n(D) \u201CInsulin promotes glucose uptake by all tissues in the body\u201D\ + : This is not a correct statement, because insulin promotes glucose uptake by the\ + \ liver, adipose tissue, and muscle, but not all tissues. For instance, the tissues\ + \ in the brain and red blood cells are not affected by insulin. The answer is (D).\n\ + \nQ: A high school science teacher fills a 1 liter bottle with pure nitrogen and\ + \ seals the lid. The pressure is 1.70 atm, and the room temperature is 25\xB0C.\ + \ Which two variables will both increase the pressure of the system, if all other\ + \ variables are held constant?\n(A) Increasing temperature, increasing moles of\ + \ gas (B) Increasing temperature, increasing volume (C) Decreasing volume, decreasing\ + \ temperature (D) Decreasing moles of gas, increasing volume\nA: Let's think step\ + \ by step. We refer to Wikipedia articles on medicine for help. The relevant equation\ + \ for this is the ideal gas law: PV=nRT. To increase the pressure of the system\ + \ (P), then either n (number of moles of the gas) or T (temperature) have to increase.\ + \ The answer is (A).\n\nQ: In a genetic test of a newborn, a rare genetic disorder\ + \ is found that has X-linked recessive transmission. Which of the following statements\ + \ is likely true regarding the pedigree of this disorder?\n(A) All descendants on\ + \ the maternal side will have the disorder. (B) Females will be approximately twice\ + \ as affected as males in this family. (C) All daughters of an affected male will\ + \ be affected. (D) There will be equal distribution of males and females affected.\n\ + A: Let's think step by step. We refer to Wikipedia articles on medicine for help.\ + \ Let\u2019s solve this step by step. Let's recall first that females have two X\ + \ chromosomes, while males have one X and one Y chromosome. This is an important\ + \ fact we need to know before answering this question. \nBecause a male can only\ + \ pass his only one X chromosome to a daughter, if he is affected by this rare genetic\ + \ disorder, then we know for sure that he will pass this rare genetic disorder to\ + \ all his future-born daughters. Therefore, \u201C(C): All daughters of an affected\ + \ male will be affected\u201D is a correct statement. The answer is (C).\n\nQ: Glucose\ + \ is transported into the muscle cell:\n(A) via protein transporters called GLUT4.\ + \ (B) only in the presence of insulin. (C) via hexokinase. (D) via monocarbylic\ + \ acid transporters.\nA: Let's think step by step. We refer to Wikipedia articles\ + \ on medicine for help. Glucose (also known as the blood sugar) is the main sugar\ + \ found in the human body. It is transported into the muscle cell via diffusion\ + \ through protein transporters called GLUT4. The answer is (A)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_college_medicine diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_physics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_physics.yaml new file mode 100644 index 00000000..d500a5b8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_physics.yaml @@ -0,0 +1,70 @@ +dataset_name: college_physics +description: 'The following are multiple choice questions (with answers) about college + physics. + + + Q: A refracting telescope consists of two converging lenses separated by 100 cm. + The eye-piece lens has a focal length of 20 cm. The angular magnification of the + telescope is + + (A) 4 (B) 5 (C) 6 (D) 20 + + A: Let''s think step by step. In a refracting telescope, if both lenses are converging, + the focus of both lenses must be between the two lenses, and thus the focal lengths + of the two lenses must add up to their separation. Since the focal length of one + lens is 20 cm, the focal length of the other must be 80 cm. The magnification is + the ratio of these two focal lengths, or 4. The answer is (A). + + + Q: The muon decays with a characteristic lifetime of about 10^-6 second into an + electron, a muon neutrino, and an electron antineutrino. The muon is forbidden from + decaying into an electron and just a single neutrino by the law of conservation + of + + (A) charge (B) mass (C) energy and momentum (D) lepton number + + A: Let''s think step by step. Lepton number must be conserved, meaning the total + number of leptons minus the number of antileptons. If a muon decays into an electron + and a single neutrino, the total lepton number would go from one to two, violating + lepton number conservation. The answer is (D). + + + Q: One end of a Nichrome wire of length 2L and cross-sectional area A is attached + to an end of another Nichrome wire of length L and cross- sectional area 2A. If + the free end of the longer wire is at an electric potential of 8.0 volts, and the + free end of the shorter wire is at an electric potential of 1.0 volt, the potential + at the junction of the two wires is most nearly equal to + + (A) 2.4 V (B) 3.3 V (C) 4.5 V (D) 5.7 V + + A: Let''s think step by step. This is a simple voltage divider problem, where the + longer wire has a resistance four times that of the shorter end. So the voltage + divider ratio is 1 / 5, meaning that the potential in the middle is 1.0 V + (8.0 + V - 1.0 V) * 1/5 = 2.4 V. The answer is (A). + + + Q: A refracting telescope consists of two converging lenses separated by 100 cm. + The eye-piece lens has a focal length of 20 cm. The angular magnification of the + telescope is + + (A) 4 (B) 5 (C) 6 (D) 20 + + A: Let''s think step by step. In a refracting telescope, if both lenses are converging, + the focus of both lenses must be between the two lenses, and thus the focal lengths + of the two lenses must add up to their separation. Since the focal length of one + lens is 20 cm, the focal length of the other must be 80 cm. The magnification is + the ratio of these two focal lengths, or 4. The answer is (A). + + + Q: For which of the following thermodynamic processes is the increase in the internal + energy of an ideal gas equal to the heat added to the gas? + + (A) Constant temperature (B) Constant volume (C) Constant pressure (D) Adiabatic + + A: Let''s think step by step. Heat added to the gas can go into the gases internal + energy or work done against an external force. However, if the volume of the gas + container is constant, no work will be done (since work is pressure times change + in volume). So, at constant volume, all of the heat goes into the internal energy. + The answer is (B).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_college_physics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_computer_security.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_computer_security.yaml new file mode 100644 index 00000000..08f23169 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_computer_security.yaml @@ -0,0 +1,35 @@ +dataset_name: computer_security +description: "The following are multiple choice questions (with answers) about computer\ + \ security.\n\nQ: SHA-1 has a message digest of\n(A) 160 bits (B) 512 bits (C) 628\ + \ bits (D) 820 bits\nA: Let's think step by step. Since SHA-1 is a hash function\ + \ which takes an input and produces a 160-bit (20-byte) hash value, its message\ + \ digest is 160 bits. The answer is (A).\n\nQ: _____________ can modify data on\ + \ your system \u2013 so that your system doesn\u2019t run correctly or you can no\ + \ longer access specific data, or it may even ask for ransom in order to give your\ + \ access.\n(A) IM \u2013 Trojans (B) Backdoor Trojans (C) Trojan-Downloader (D)\ + \ Ransom Trojan\nA: Let's think step by step. The system is asking for trojans,\ + \ which are for ransom, which means ransom trojan. The answer is (D).\n\nQ: What\ + \ is ethical hacking?\n(A) \"Hacking\" ethics so they justify unintended selfish\ + \ behavior (B) Hacking systems (e.g., during penetration testing) to expose vulnerabilities\ + \ so they can be fixed, rather than exploited (C) Hacking into systems run by those\ + \ whose ethics you disagree with (D) A slang term for rapid software development,\ + \ e.g., as part of hackathons\nA: Let's think step by step. Ethical hacking is a\ + \ process of detecting vulnerabilities in an application, system, or organization's\ + \ infrastructure that an attacker can use to exploit an individual or organization.\ + \ They use this process to prevent cyberattacks and security breaches by lawfully\ + \ hacking into the systems and looking for weak points. The answer is (B).\n\nQ:\ + \ The ____________ is anything which your search engine cannot search.\n(A) Haunted\ + \ web (B) World Wide Web (C) Surface web (D) Deep Web\nA: Let's think step by step.\ + \ The search engine searches on the Surface Web, which is the portion of the world\ + \ wide web which is visible so (B,C) are wrong. The Haunted Web doesn\u2019t correspond\ + \ to an internet concept. The Deep Web is the part of the World Wide Web which is\ + \ not indexed. The answer is (D).\n\nQ: Exploitation of the Heartbleed bug permits\n\ + (A) overwriting cryptographic keys in memory (B) a kind of code injection (C) a\ + \ read outside bounds of a buffer (D) a format string attack\nA: Let's think step\ + \ by step. The Heartbleed Bug is a serious vulnerability in the popular OpenSSL\ + \ cryptographic software library. Heartbleed resulted from improper input validation\ + \ (due to a missing bounds check) in the implementation of the TLS heartbeat extension.\ + \ The vulnerability was classified as a buffer over-read, a situation where more\ + \ data can be read than should be allowed. The answer is (C)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_computer_security diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_conceptual_physics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_conceptual_physics.yaml new file mode 100644 index 00000000..df845ce8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_conceptual_physics.yaml @@ -0,0 +1,32 @@ +dataset_name: conceptual_physics +description: "\nThe following are multiple choice questions (with answers) about conceptual\ + \ physics.\n\nQ: Colors in a soap bubble result from light\n(A) converted to a different\ + \ frequency (B) deflection (C) interference (D) polarization\nA: Let's think step\ + \ by step. In a soap bubble film, the light bounces between the two soap-air interfaces\ + \ many times, interfering with itself constructively or destructively depending\ + \ on the width of the film. This results in different colors being visible. The\ + \ answer is (C).\n\nQ: Compared with the mass of a uranium atom undergoing fission,\ + \ the combined masses of the products after fission are\n(A) less (B) more (C) the\ + \ same (D) zero\nA: Let's think step by step. Fission releases energy, which comes\ + \ from the rest mass of its initial nucleus. Thus the mass of the products is less\ + \ than the mass of the reactant uranium nucleus. The answer is (A).\n\nQ: Things\ + \ that are equivalent according to the equivalence principle are\n(A) space and\ + \ time. (B) a traveling twin and a stay-at-home twin. (C) gravity and acceleration.\ + \ (D) mass and energy.\nA: Let's think step by step. Einstein\u2019s famous equivalence\ + \ principle states that gravity and acceleration are equivalent. The answer is (C).\n\ + \nQ: Which of these three elements has the most mass per nucleon?\n(A) Hydrogen\ + \ (B) Iron (C) Uranium (D) Same in each\nA: Let's think step by step. Due to nuclear\ + \ binding energy, the mass of an atomic nucleus is less than the sum of individual\ + \ masses of the free constituent protons and neutrons; this is known as the mass\ + \ defect. Hydrogen has no mass defect because it has only a single nucleon, so it\ + \ will have the most mass per nucleon. The answer is (A).\n\nQ: A model airplane\ + \ flies slower when flying into the wind and faster with wind at its back. When\ + \ launched at right angles to the wind a cross wind its groundspeed compared with\ + \ flying in still air is\n(A) the same (B) greater (C) less (D) either greater or\ + \ less depending on wind speed\nA: Let's think step by step. The plane\u2019s speed\ + \ in the direction of the wind is greater than it would be in the absence of wind,\ + \ and its direction orthogonal to the wind is the same as it would be in the absence\ + \ of the wind. The total speed, which is these two components added in quadrature,\ + \ is thus greater than the speed in still air. The answer is (B)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_conceptual_physics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_econometrics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_econometrics.yaml new file mode 100644 index 00000000..33883f47 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_econometrics.yaml @@ -0,0 +1,63 @@ +dataset_name: econometrics +description: "The following are multiple choice questions (with answers) about econometrics.\n\ + \nQ: Suppose now that a researcher wishes to use information criteria to determine\ + \ the optimal lag length for a VAR. 500 observations are available for the bi-variate\ + \ VAR, and the values of the determinant of the variance-covariance matrix of residuals\ + \ are 0.0336, 0.0169, 0.0084, and 0.0062 for 1, 2, 3, and 4 lags respectively. What\ + \ is the optimal model order according to Akaike's information criterion?\n(A) 1\ + \ lag (B) 2 lags (C) 3 lags (D) 4 lags\nA: Let's think step by step. We refer to\ + \ Wikipedia articles on econometrics for help. Let\u2019s solve this problem step\ + \ by step. First of all, let\u2019s recall that for a given set of data, Akaike's\ + \ information criterion (AIC) allows us to measure how well a statistical model\ + \ fits the data; it is an estimator of prediction error. Here in this problem we\ + \ will need to use the formula ln(det(sigma_hat)) + (2 * k / T) to determine the\ + \ values of Akaike\u2019s criterion, where ln denotes the natural log function,\ + \ det the determinant function, k the total number of parameters in total (across\ + \ both equations), and T the number of observations (which, in this case, is equal\ + \ to 500). For 1 lag, the number of parameters in total is equal to 6; for 2 lags,\ + \ it is 10; for 3 lags, it is 14; and for 4 lags, it is 18. Now, let\u2019s calculate\ + \ the values of the criterion for each lag:\n(A) 1 lag: ln(0.0336) + (2 * 6 / 500)\ + \ = ln(0.0336) + (12 / 500) = -3.369\n(B) 2 lags: ln(0.0169) + (2 * 10 / 500) =\ + \ ln(0.0169) + (20 / 500) = -4.040\n(C) 3 lags: ln(0.0084) + (2 * 14 / 500) = ln(0.0084)\ + \ + (28 / 500) =-4.724\n(D) 4 lags: ln(0.0062) + (2 * 18 / 500) = ln(0.0062) + (36\ + \ / 500) =-5.011\nBecause the optimal model order according to AIC minimizes the\ + \ information criterion, the answer should be the one with the lowest value. In\ + \ this case, (D) has the lowest value. The answer is (C).\n\nQ: Consider the following\ + \ AR(1) model with the disturbances having zero mean and unit variance\nyt = 0.2\ + \ + 0.4 yt-1 + ut\nThe (unconditional) mean of y will be given by\n(A) 0.2 (B) 0.4\ + \ (C) 0.5 (D) 0.33\nA: Let's think step by step. We refer to Wikipedia articles\ + \ on econometrics for help. Let\u2019s solve this problem step by step. If we have\ + \ a an AR(1) model with the disturbances having zero mean and unit variance, then\ + \ the unconditional mean of y is equal to the following:\nunconditional mean of\ + \ y = (the intercept term) / (1 - autoregressive coefficient)\nWe know that the\ + \ intercept term is 0.2 and the autoregressive coefficient is 0.4; thus, we have:\n\ + unconditional mean of y = (0.2) / (1 - 0.4) = (0.2) / (0.6) = 2 / 6 = 1 / 3, which\ + \ is approximately 0.33. That means that the answer should be (D) 0.33. The answer\ + \ is (D).\n\nQ: What would be then consequences for the OLS estimator if heteroscedasticity\ + \ is present in a regression model but ignored?\n(A) It will be biased (B) It will\ + \ be inconsistent (C) It will be inefficient (D) All of (a), (b) and (c) will be\ + \ true.\nA: Let's think step by step. We refer to Wikipedia articles on econometrics\ + \ for help. Heteroscedasticity refers to the condition where the variance of the\ + \ error terms is not constant across multiple observations. If heteroscedasticity\ + \ is present in a regression model, then the coefficient estimates in the OLS estimator\ + \ will be not only unbiased and consistent but also inefficient. Because (A) and\ + \ (B) are incorrect choices and (C) is a correct choice, (D) cannot be the right\ + \ answer. Ultimately, (C) is the only true choice. The answer is (C).\n\nQ: Suppose\ + \ that a test statistic has associated with it a p-value of 0.08. Which one of the\ + \ following statements is true?\n(i) If the size of the test were exactly 8%, we\ + \ would be indifferent between rejecting and not rejecting the null hypothesis\n\ + (ii) The null would be rejected if a 10% size of test were used\n(iii) The null\ + \ would not be rejected if a 1% size of test were used\n(iv) The null would be rejected\ + \ if a 5% size of test were used.\n(A) (ii) and (iv) only (B) (i) and (iii) only\ + \ (C) (i), (ii), and (iii) only (D) (i), (ii), (iii), and (iv).\nA: Let's think\ + \ step by step. We refer to Wikipedia articles on econometrics for help. Let\u2019\ + s reason about each of the options.\n(i) is a true statement.\n(ii) is a true statement.\n\ + (iii) is a true statement.\n(iv) is not a true statement. Thus, (i), (ii), and (iii)\ + \ are true. The answer is (C).\n\nQ: For a stationary autoregressive process, shocks\ + \ will\n(A) Eventually die away (B) Persist indefinitely (C) Grow exponentially\ + \ (D) Never occur\nA: Let's think step by step. We refer to Wikipedia articles on\ + \ econometrics for help. This is a formal logic problem about stationally process.\ + \ For a stationary autoregressive process, shocks will eventually die away. The\ + \ answer is (A)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_econometrics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_electrical_engineering.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_electrical_engineering.yaml new file mode 100644 index 00000000..cdd31ce4 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_electrical_engineering.yaml @@ -0,0 +1,34 @@ +dataset_name: electrical_engineering +description: "\nThe following are multiple choice questions (with answers) about electrical\ + \ engineering.\n\nQ: A point pole has a strength of 4\u03C0 * 10^-4 weber. The force\ + \ in newtons on a point pole of 4\u03C0 * 1.5 * 10^-4 weber placed at a distance\ + \ of 10 cm from it will be\n(A) 15 N. (B) 20 N. (C) 7.5 N. (D) 3.75 N.\nA: Let's\ + \ think step by step. The force between two point poles is given by m_1m_2/(mu_0\ + \ 4 \\pi r^2), in analogy to Coulomb\u2019s law. Plugging in the values given in\ + \ the question, we calculate that the force is approximately 15 N. The answer is\ + \ (A).\n\nQ: The coil of a moving coil meter has 100 turns, is 40 mm long and 30\ + \ mm wide. The control torque is 240*10-6 N-m on full scale. If magnetic flux density\ + \ is 1Wb/m2 range of meter is\n(A) 1 mA. (B) 2 mA. (C) 3 mA. (D) 4 mA.\nA: Let's\ + \ think step by step. The torque on a coil in a uniform magnetic field is given\ + \ by BANI, where B is the magnetic flux density, A is the area of the coil, N is\ + \ the number of turns, and I is the current. So we have that I = (Torque)/(BAN),\ + \ or 240e-6/(1200e-6 * 100 * 1) = 2e-3. The answer is (B).\n\nQ: In an SR latch\ + \ built from NOR gates, which condition is not allowed\n(A) S=0, R=0 (B) S=0, R=1\ + \ (C) S=1, R=0 (D) S=1, R=1\nA: Let's think step by step. An SR latch is a set-reset\ + \ latch; in the case where S=1 and R=1, the circuit has no stable state; instead\ + \ a race condition will be produced within the circuit, so the device will be in\ + \ an undefined state. So S=1, R=1 is an illegal input. The answer is (D).\n\nQ:\ + \ Two long parallel conductors carry 100 A. If the conductors are separated by 20\ + \ mm, the force per meter of length of each conductor will be\n(A) 100 N. (B) 0.1\ + \ N. (C) 1 N. (D) 0.01 N.\nA: Let's think step by step. The magnetic force-per-length\ + \ between two current-carrying conductors is given by \\mu_0 I_1 I_2 / (2 \\pi r),\ + \ where $r$ is the separation distance and I_1 and I_2 are the currents. Plugging\ + \ in 100 A for I_1 and I_2, and 20 mm for r, gives 0.1 N. The answer is (B).\n\n\ + Q: In a 2 pole lap winding dc machine , the resistance of one conductor is 2\u03A9\ + \ and total number of conductors is 100. Find the total resistance\n(A) 200\u03A9\ + \ (B) 100\u03A9 (C) 50\u03A9 (D) 10\u03A9\nA: Let's think step by step. In lap winding,\ + \ effectively two resistors are connected in parallel, so the actual resistance\ + \ of each pair is 1 Ohm. Since we have 50 pairs, we get a total resistance of 50\ + \ Ohms. The answer is (C)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_electrical_engineering diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_elementary_mathematics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_elementary_mathematics.yaml new file mode 100644 index 00000000..a85f799c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_elementary_mathematics.yaml @@ -0,0 +1,40 @@ +dataset_name: elementary_mathematics +description: "The following are multiple choice questions (with answers) about elementary\ + \ mathematics.\n\nQ: Olivia used the rule \"Add 11\" to create the number pattern\ + \ shown below. 10, 21, 32, 43, 54. Which statement about the number pattern is true?\n\ + (A) The 10th number in the pattern will be an even number.\n(B) The number pattern\ + \ will never have two even numbers next to each other.\n(C) The next two numbers\ + \ in the pattern will be an even number then an odd number.\n(D) If the number pattern\ + \ started with an odd number then the pattern would have only odd numbers in it.\n\ + A: Let's think step by step. Choice A is incorrect because every even-numbered term\ + \ in the pattern is odd, and 10 is an even number. Choice B is correct, because\ + \ adding an odd number (in this case 11) to an odd number produces an even number,\ + \ and adding an odd number to an even number produces an odd number. Thus the terms\ + \ in the pattern will alternate between odd and even, so there will never be two\ + \ even numbers next to each other. Choice C is incorrect because the last term in\ + \ the example is even (54), and we know that the terms will alternate between even\ + \ and odd. Choice D is incorrect because the terms in the pattern will alternate\ + \ between odd and even, regardless of the value of the first term. The answer is\ + \ (B).\n\nQ: The population of the city where Michelle was born is 145,826. What\ + \ is the value of the 5 in the number 145,826?\n(A) 5 thousands\n(B) 5 hundreds\n\ + (C) 5 tens\n(D) 5 ones\nA: Let's think step by step. Choice A is correct, because\ + \ there are three digits following the 5, so\nthe 5 is in the thousands place. Thus\ + \ the other choices are incorrect. The answer is (A).\n\nQ: A store sells 107 different\ + \ colors of paint. They have 25 cans of each color in storage. The number of cans\ + \ of paint the store has in storage can be found using the expression below. 107\ + \ \xD7 25. How many cans of paint does the store have in storage?\n(A) 749\n(B)\ + \ 2,675\n(C) 2,945\n(D) 4,250\nA: Let's think step by step. We can calculate 107\ + \ x 25 = (100 x 25) + (7 x 25) = 2500 + 175 = 2675. The answer is (B).\n\nQ: A total\ + \ of 30 players will play basketball at a park. There will be exactly 5 players\ + \ on each team. Which statement correctly explains how to find the number of teams\ + \ needed?\n(A) Add 5 to 30 to find 35 teams.\n(B) Divide 30 by 5 to find 6 teams.\n\ + (C) Multiply 30 and 5 to find 150 teams.\n(D) Subtract 5 from 30 to find 25 teams.\n\ + A: Let's think step by step. We want to find the number of teams. We know that there\ + \ are 5 players/team, and 30 players. Thus to get the number of teams we divide\ + \ players by players/team, so 30 players / 5 players/team = 6 teams. The answer\ + \ is (B).\n\nQ: Which expression is equivalent to 5 x 9?\n(A) (5 x 4) x (6 x 5)\n\ + (B) (5 x 5) + (5 x 4)\n(C) (5 x 5) + (5 x 9)\n(D) (5 x 9) x (6 x 9)\nA: Let's think\ + \ step by step. We know that 9 = (5 + 4), so 5 x 9 = 5 x (5 + 4) = (5 x 5) + (5\ + \ x 4). The answer is (B)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_elementary_mathematics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_formal_logic.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_formal_logic.yaml new file mode 100644 index 00000000..5de7486c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_formal_logic.yaml @@ -0,0 +1,57 @@ +dataset_name: formal_logic +description: "The following are multiple choice questions (with answers) about formal\ + \ logic.\n\nQ: Which of the given formulas of PL is the best symbolization of the\ + \ following sentence?\nTurtles live long lives and are happy creatures, unless they\ + \ are injured.\n(A) (L \u2022 H) \u2261 I (B) (L \u2022 H) \u2228 I (C) L \u2022\ + \ (H \u2228 I) (D) L \u2022 (H \u2283 R).\nA: Let's think step by step. We refer\ + \ to Wikipedia articles on formal logic for help. Let\u2019s solve this step by\ + \ step. Let \u201CL\u201D denote \u201Cliving long\u201D, H \u201Cbeing happy\u201D\ + , and \u201CI\u201D \u201Cbeing injured\u201D. Now, consider each choice:\n(A) means\ + \ (living long AND being happy) is equivalent to (being injured). \n(B) means (living\ + \ long AND being happy) OR (being injured). \n(C) means (living long) AND (being\ + \ happy OR being injured). \n(D) means (living long) AND (being happy implies being\ + \ R), but what R denotes is not clear.\nObviously, (B) is the best symbolization\ + \ of the original sentence. The answer is (B).\n\nQ: Select the best translation\ + \ into predicate logic.George borrows Hector's lawnmower. (g: George; h: Hector;\ + \ l: Hector's lawnmower; Bxyx: x borrows y from z).\n(A) Blgh (B) Bhlg (C) Bglh\ + \ (D) Bghl\nA: Let's think step by step. We refer to Wikipedia articles on formal\ + \ logic for help. Let\u2019s solve this step by step. We are told that \u201CBxyx\u201D\ + \ means \u201Cx borrows y from z\u201D. We can rewrite \u201CGeorge borrows Hector's\ + \ lawnmower\u201D as \u201CGeorge borrows a lawnmower from Hector\u201D, which can\ + \ then be translated into predicate logic as \u201CBglh\u201D. The answer \u201C\ + Bglh\u201D appears in (C); therefore, (C) must be the correct answer. The answer\ + \ is (C).\n\nQ: \nSelect the best English interpretation of the given arguments\ + \ in predicate logic.\nDm\n(\u2200x)(Wx \u2283 ~Dx). \n(\u2200x)Wx \u2228 Ag\t/\ + \ (\u2203x)Ax\n(A) Marina is a dancer. Some weaklings are not dancers. Either everything\ + \ is a weakling or Georgia plays volleyball. So something plays volleyball. (B)\ + \ Marina is a dancer. No weakling is a dancer. Everything is either a weakling or\ + \ plays volleyball. So something plays volleyball. (C) Marina is a dancer. Some\ + \ weaklings are not dancers. Everything is either a weakling or plays volleyball.\ + \ So something plays volleyball. (D) Marina is a dancer. No weakling is a dancer.\ + \ Either everything is a weakling or Georgia plays volleyball. So something plays\ + \ volleyball.\nA: Let's think step by step. We refer to Wikipedia articles on formal\ + \ logic for help. Let\u2019s solve this step by step. Let \u201CD\u201D denote \u201C\ + being a dancer\u201D, \u201Cm\u201D denote \u201CMaria\u201D, \u201Cg\u201D denote\ + \ \u201CGeorgia\u201D, \u201CW\u201D denote \u201Cweakling\u201D, \u201CA\u201D\ + \ denote \u201Cplaying volleyball\u201D. Then, we have the following:\n1. Dm \u2192\ + \ Maria is a dance.\n2. (\u2200x)(Wx \u2283 ~Dx). \u2192 For all x, if x is a weakling,\ + \ then x is not a dancer. In other words, no weakling is a dancer.\n3. (\u2200x)Wx\ + \ \u2228 Ag\t/ (\u2203x)Ax \u2192 For all x, x is a weakling or Georgia plays volleyball.\ + \ So there exists an x that plays volleyball. \nOptions (A) and (C) do claim that\ + \ some weaklings are not dancers, but the second argument strongly states that no\ + \ weakling is a dancer. Thus, we can eliminate them. Option (B) omits the important\ + \ detail about Georgia playing volleyball. Option (D) has all the details presented\ + \ in the arguments and is the best English interpretation of the arguments. The\ + \ answer is (D).\n\nQ: Select the best translation into predicate logic: No people\ + \ drive on Mars.\n(A) ~Pd (B) (\u2200x)(Px \u2228 ~Dx) (C) (\u2200x)(Px \u2283 ~Dx)\ + \ (D) ~Dp\nA: Let's think step by step. We refer to Wikipedia articles on formal\ + \ logic for help. Let\u2019s solve this step by step. Let \u201CP\u201D denote \u201C\ + being on Mars\u201D and \u201CD\u201D denote \u201Cdriving on Mars\u201D. Then let\u2019\ + s consider each option:\nOption (A): ~Pd \u2192 d is not on Mars.\nOption (B): (\u2200\ + x)(Px \u2228 ~Dx) \u2192 For all x, x is on Mars and x do not drive on Mars.\nOption\ + \ (C): (\u2200x)(Px \u2283 ~Dx) \u2192 For all x, x is on Mars implies that x do\ + \ not drive on Mars.\nOption (D): ~Dp: \u2192 p do not drive on Mars.\nOf all these\ + \ options, Option (C) appears to be the best and most meaningful interpretation\ + \ of the argument \u201CNo people drive on Mars.\u201D The answer is (C)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_formal_logic diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_global_facts.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_global_facts.yaml new file mode 100644 index 00000000..d5031b00 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_global_facts.yaml @@ -0,0 +1,33 @@ +dataset_name: global_facts +description: "The following are multiple choice questions (with answers) about global\ + \ facts.\n\nQ: As of 2017, how many of the world\u2019s 1-year-old children today\ + \ have been vaccinated against some disease? *\n(A) 80% (B) 60% (C) 40% (D) 20%\n\ + A: Let's think step by step. We refer to Wikipedia articles on global facts for\ + \ help. According to data published by the World Health Organization, the nummber\ + \ of 1-year-old children vaccinated in 2017 exceeds 80%. The answer is (A).\n\n\ + Q: As of 2019, about what percentage of Americans agree that the state is run for\ + \ the benefit of all the people?\n(A) 31% (B) 46% (C) 61% (D) 76%\nA: Let's think\ + \ step by step. We refer to Wikipedia articles on global facts for help. In 2019,\ + \ about 46% percentage of Americans agree that the state is run for the benefit\ + \ of all the people. The answer is (B).\n\nQ: As of 2019, about what percentage\ + \ of Russians say it is very important to have free media in our country without\ + \ government/state censorship?\n(A) 38% (B) 53% (C) 68% (D) 83%\nA: Let's think\ + \ step by step. We refer to Wikipedia articles on global facts for help. As of 2019,\ + \ about 38% of Russians say it is very important to have free media in our country.\ + \ The answer is (A).\n\nQ: As of 2015, since 1990 forests have ____ in Europe and\ + \ have ____ in Africa and the Americas.\n(A) increased, increased (B) increased,\ + \ decreased (C) decreased, increased (D) decreased, decreased\nA: Let's think step\ + \ by step. We refer to Wikipedia articles on global facts for help. As of 2015,\ + \ since 1990 forests have increased in Europe and have decreased in Africa and the\ + \ Americas. The answer is (B).\n\nQ: Which of the following pairs of statements\ + \ are both true (as of 2019)?\n(A) People tend to be optimistic about their own\ + \ future and the future of their nation or the world. (B) People tend to be optimistic\ + \ about their own future but pessimistic about the future of their nation or the\ + \ world. (C) People tend to be pessimistic about their own future but optimistic\ + \ about the future of their nation or the world. (D) People tend to be pessimistic\ + \ about their own future and the future of their nation or the world.\nA: Let's\ + \ think step by step. We refer to Wikipedia articles on global facts for help. As\ + \ of 2019, most people tend to be optimistic about their own future but pessimistic\ + \ about the future of their nation or the world. The answer is (B)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_global_facts diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_biology.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_biology.yaml new file mode 100644 index 00000000..91295fe8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_biology.yaml @@ -0,0 +1,54 @@ +dataset_name: high_school_biology +description: "The following are multiple choice questions (with answers) about high\ + \ school biology.\n\nQ: In animal cells, which of the following represents the most\ + \ likely pathway that a secretory protein takes as it is synthesized in a cell?\n\ + (A) Plasma membrane\u2013Golgi apparatus\u2013ribosome\u2013secretory vesicle\u2013\ + rough ER (B) Ribosome\u2013Golgi apparatus\u2013rough ER\u2013secretory vesicle\u2013\ + plasma membrane (C) Plasma membrane\u2013Golgi apparatus\u2013ribosome\u2013secretory\ + \ vesicle\u2013rough ER (D) Ribosome\u2013rough ER\u2013Golgi apparatus\u2013secretory\ + \ vesicle\u2013plasma membrane\nA: Let's think step by step. Protein synthesis starts\ + \ at the ribosome, so we can eliminate (A) and (C). The ribosome is often in the\ + \ endoplasmic reticulum and moves from there to the Golgi apparatus, where it is\ + \ modified and packaged into a vesicle. The vesicle then floats to the plasma membrane\ + \ and is secreted. The answer is (D).\n\nQ: A mutation in a bacterial enzyme changed\ + \ a previously polar amino acid into a nonpolar amino acid. This amino acid was\ + \ located at a site distant from the enzyme\u2019s active site. How might this mutation\ + \ alter the enzyme\u2019s substrate specificity?\n(A) By changing the enzyme\u2019\ + s pH optimum (B) By changing the enzyme\u2019s location in the cell (C) By changing\ + \ the shape of the protein (D) An amino acid change away from the active site cannot\ + \ alter the enzyme\u2019s substrate specificity.\nA: Let's think step by step. A\ + \ change in an amino acid leads to a change in the primary structure of the protein.\ + \ A change in the primary structure may lead to a change in the secondary and the\ + \ tertiary structure of the protein. A change in the tertiary structure means a\ + \ change in the shape of the protein, so (C) has to be correct. Since the change\ + \ does not affect the active site of the enzyme, we do not expect the activity of\ + \ the enzyme to be affected. The answer is (C).\n\nQ: Which of the following is\ + \ not a way to form recombinant DNA?\n(A) Translation (B) Conjugation (C) Specialized\ + \ transduction (D) Transformation\nA: Let's think step by step. The introduction\ + \ of foreign DNA or RNA into bacteria or eukaryotic cells is a common technique\ + \ in molecular biology and scientific research. There are multiple ways foreign\ + \ DNA can be introduced into cells including transformation, transduction, conjugation,\ + \ and transfection. In contrast, (A) is not a way to form DNA: during translation\ + \ the ribosomes synthesize proteins from RNA. The answer is (A).\n\nQ: Homologous\ + \ structures are often cited as evidence for the process of natural selection. All\ + \ of the following are examples of homologous structures EXCEPT\n(A) the wings of\ + \ a bird and the wings of a bat (B) the flippers of a whale and the arms of a man\ + \ (C) the pectoral fins of a porpoise and the flippers of a seal (D) the forelegs\ + \ of an insect and the forelimbs of a dog\nA: Let's think step by step. \u200B\u200B\ + Homologous structures are similar physical features in organisms that share a common\ + \ ancestor \u200B\u200Bbut different functions. Comparisons (B) and (C) are clearly\ + \ homologous because they share a common ancestor and the structures serve different\ + \ purposes. Bat wings and birg wings are also homologous, while they are both wings,\ + \ the forelimbs serve different purposes. Insects and dogs are very far ancestors\ + \ since one is vertebrate while the other is invertebrate and the forelimbs serve\ + \ the same purpose, so they are not homologous. The answer is (D).\n\nQ: Which of\ + \ the following is not known to be involved in the control of cell division?\n(A)\ + \ Cyclins (B) Protein kinases (C) Checkpoints (D) Fibroblast cells\nA: Let's think\ + \ step by step. Normal cells move through the cell cycle in a regulated way. At\ + \ the checkpoint stage, they use information about their own internal state and\ + \ cues from the environment around them to decide whether to proceed with cell division.\ + \ Cues like these act by changing the activity of core cell cycle regulators inside\ + \ the cell. The most common regulators are cyclins and cyclin-dependent kinases.\ + \ Fibroblast cells do not play any role in cell division. The answer is (D)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_biology diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_chemistry.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_chemistry.yaml new file mode 100644 index 00000000..ce2a26cc --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_chemistry.yaml @@ -0,0 +1,49 @@ +dataset_name: high_school_chemistry +description: "The following are multiple choice questions (with answers) about high\ + \ school chemistry.\n\nQ: Which of the following is considered an acid anhydride?\n\ + (A) HCl (B) H2SO3 (C) SO2 (D) Al(NO3)3\nA: Let's think step by step. An acid anhydride\ + \ is a compound that is derived by removing water from an acid. The chemical formula\ + \ for water is H2O, which means that we need to determine which of these options,\ + \ when combined with H2O, forms an acid. SO2, or Sulfur dioxide, when combined with\ + \ H2O, makes H2SO4, or sulfuric acid. The answer is (C).\n\nQ: Which of the following\ + \ is expected to be a polar molecule?\n(A) PCl4F (B) BF3 (C) CO2 (D) Si(CH3)4\n\ + A: Let's think step by step. A polar molecule is one that has a slightly positive\ + \ charge on one end of the molecule and a slightly negative charge on the other\ + \ end. Boron trifluoride (BF3) has Boron as the center atom and three fluorine atoms\ + \ attached to it; it is trigonal planar and symmetric, so it is nonpolar. Carbon\ + \ Dioxide (CO2) has Carbon as the central atom with double bonds to two Oxygen atoms\ + \ - this is also symmetrical and therefore nonpolar. The same is the case for tetramethyl\ + \ silane (SI(CH3)4), which is a Silicon atom surrounded by four methyl groups. The\ + \ structure of PCL4F is that Phosphorus is the central atom, attached to four chlorines\ + \ and one fluorine atom. This is asymmetrical, and therefore has a net dipole and\ + \ is expected to be a polar molecule. The answer is (A).\n\nQ: From the solubility\ + \ rules, which of the following is true?\n(A) All chlorides, bromides, and iodides\ + \ are soluble (B) All sulfates are soluble (C) All hydroxides are soluble (D) All\ + \ ammonium-containing compounds are soluble\nA: Let's think step by step. The chlorides,\ + \ bromides, and iodides of lead, silver, and mercury are not soluble in water. This\ + \ rules out (A). The sulfates of lead, barium, and calcium are not soluble in water,\ + \ which rules out (B). The hydroxides of any metal besides sodium, potassium, ammonium,\ + \ calcium, and barium are insoluble. This rules out (C). Typically ammonium ions\ + \ indicate a soluble ionic substance. The answer is (D).\n\nQ: A new compound is\ + \ synthesized and found to be a monoprotic acid with a molar mass of 248 g/mol.\ + \ When 0.0050 mol of this acid are dissolved in 0.500 L of water, the pH is measured\ + \ as 3.89. What is the pKa of this acid?\n(A) 3.89 (B) 7.78 (C) 5.78 (D) 2.33\n\ + A: Let's think step by step. Recall that $[A] = [H^{+}]$. Here, this is equal to\ + \ $$10^{-3.89}$. Then we have $K_{a} = $\nrac{[H^{+}][A^{-}]}{[HA]} = \nrac{10^{-3.89}\ + \ \\cdot 10^{-3.89}}{10^{-2}}. The resulting exponent is $-3.89 + (-3.89) - (-2)\ + \ = 5.78$, therefore $K_a = 10^{-5.78}$. The $pK_a$ is the negative log of $K_a$,\ + \ which is equal to $5.78$. The answer is (C).\n\nQ: A solution contains 2.00 mole\ + \ of acetic acid, CH3COOH, and 1.00 mole of calcium acetate, Ca(CH3COO)2. The solution\ + \ is able to resist the addition of a small amount of strong acid or strong base\ + \ with only minor changes in the pH of the solution. Larger quantities of strong\ + \ acid or strong base can cause a significant change in pH. How many moles of nitric\ + \ acid, HNO3, may be added before the pH begins to change significantly?\n(A) 0.500\ + \ mole (B) 1.00 mole (C) 2.00 mole (D) 3.00 mole\nA: Let's think step by step. We\ + \ would like to compute the buffer capacity of this solution. First we write the\ + \ equation for the ionization of the weak acid, in this case of acetic acid. $CH_{3}COOH\ + \ (aq) + H_{2}O \nightarrow H_{3}O^{+} + CH3COO^{-}$. The conjugate base is therefore\ + \ the acetate ion. The added strong acid, Nitric acid, will react with the conjugate\ + \ base. Therefore the maximum amount of acid that can be added will be equal to\ + \ the amount of acetate ion, or 2 moles. The answer is (C)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_chemistry diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_computer_science.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_computer_science.yaml new file mode 100644 index 00000000..16a9f66d --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_computer_science.yaml @@ -0,0 +1,70 @@ +dataset_name: high_school_computer_science +description: "The following are multiple choice questions (with answers) about high\ + \ school computer science.\n\nQ: Which of the following is an example of the use\ + \ of a device on the Internet of Things (IoT) ?\n(A) A car alerts a driver that\ + \ it is about to hit an object. (B) A hiker uses a G P S watch to keep track of\ + \ her position. (C) A refrigerator orders milk from an online delivery service when\ + \ the milk in the refrigerator is almost gone. (D) A runner uses a watch with optical\ + \ sensors to monitor his heart rate.\nA: Let's think step by step. The term Internet\ + \ of Things (IoT) refers to common devices which are connected to the internet,\ + \ enabling new functionality. Choice A is incorrect because it does not describe\ + \ an internet connected device. In choice B, the watch is only described as having\ + \ GPS functionality but no internet connectivity. Choice C describes a common device\ + \ (a refrigerator) which has internet connectivity enabling new functionality (online\ + \ ordering). Choice D does not mention internet connectivity for the watch, only\ + \ optical sensors. The answer is (C).\n\nQ: Many Web browsers allow users to open\ + \ anonymous windows. During a browsing session in an anonymous window, the browser\ + \ does not record a browsing history or a list of downloaded files. When the anonymous\ + \ window is exited, cookies created during the session are deleted. Which of the\ + \ following statements about browsing sessions in an anonymous window is true?\n\ + (A) The activities of a user browsing in an anonymous window will not be visible\ + \ to people who monitor the user's network, such as the system administrator. (B)\ + \ Items placed in a Web store's shopping cart for future purchase during the anonymous\ + \ browsing session will not be saved on the user's computer. (C) A user will not\ + \ be able to log in to e-mail or social media accounts during the anonymous browsing\ + \ session. (D) A user browsing in an anonymous window will be protected from viruses\ + \ launched from any web sites visited or files downloaded.\nA: Let's think step\ + \ by step. Choice A is incorrect as it only describes network traffic, which an\ + \ anonymous browser does not change. Choice B is correct as it correctly describes\ + \ how an anonymous browser will prevent saving data on the user\u2019s computer\ + \ after the session is ended. Choice C is incorrect because an anonymous browser\ + \ will not prevent logging in to email or social media accounts. Choice D is incorrect\ + \ because an anonymous browser in itself performs no virus protection. The answer\ + \ is (B).\n\nQ: In the program below, the initial value of X is 5 and the initial\ + \ value of Y is 10.\nIF (X < 0){\n DISPLAY (\"Foxtrot\")\n} ELSE {\n IF (X > Y){\n\ + \ DISPLAY (\"Hotel\")\n } ELSE {\n IF (Y > 0){\n DISPLAY (\"November\")\n }\ + \ ELSE {\n DISPLAY (\"Yankee\")\n }\n }\n}\nWhat is displayed as a result of\ + \ running the program?\n(A) Foxtrot (B) Hotel (C) November (D) Yankee\nA: Let's\ + \ think step by step. Because X has the value 5, the first conditional IF (X < 0)\ + \ is false, so we move to the first ELSE clause. Because X is 5 and Y is 10, the\ + \ second conditional IF (X > Y) is false, so we move to the following ELSE clause.\ + \ Since Y is 10, the conditional IF (Y > 0) is true, so the command DISPLAY (\"\ + November\") is executed. The answer is (C).\n\nQ: What is the output of \"abc\"\ + [::-1] in Python 3?\n(A) Error (B) abc (C) cba (D) c\nA: Let's think step by step.\ + \ We know that the slicing operator [::-1] takes all of the elements in the string\ + \ in reverse order, so we reverse the order of the string \"abc\", resulting in\ + \ \"cba\". The answer is (C).\n\nQ: A list of numbers has n elements, indexed from\ + \ 1 to n. The following algorithm is intended to display the number of elements\ + \ in the list that have a value greater than 100. The algorithm uses the variables\ + \ count and position. Steps 3 and 4 are missing.\n Step 1: Set count to 0 and position\ + \ to 1.\n Step 2: If the value of the element at index position is greater than\ + \ 100, increase the value of count by 1.\n Step 3: (missing step)\n Step 4: (missing\ + \ step)\n Step 5: Display the value of count.\nWhich of the following could be used\ + \ to replace steps 3 and 4 so that the algorithm works as intended?\n(A) Step 3:\ + \ Increase the value of position by 1.\n Step 4: Repeat steps 2 and 3 until the\ + \ value of count is greater than 100.\n(B) Step 3: Increase the value of position\ + \ by 1.\n Step 4: Repeat steps 2 and 3 until the value of position is greater than\ + \ n.\n(C) Step 3: Repeat step 2 until the value of count is greater than 100.\n\ + \ Step 4: Increase the value of position by 1.\n(D) Step 3: Repeat step 2 until\ + \ the value of position is greater than n.\n Step 4: Increase the value of count\ + \ by 1.\nA: Let's think step by step. Choice A is incorrect, because its Step 4\ + \ has an incorrect termination condition, stopping when count is greater than 100.\ + \ We need to stop after inspecting all elements in the list. Choice B is correct\ + \ because it correctly increments both count and position, and correctly repeats\ + \ these steps and terminates when all elements in the list have been inspected.\ + \ Choice C is incorrect because it incorrectly increments the variable count until\ + \ its value is greater than 100, regardless of the elements in the list. Choice\ + \ D is incorrect because its step 3 does not increment the value of position, so\ + \ it will repeat forever. The answer is (B)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_computer_science diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_european_history.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_european_history.yaml new file mode 100644 index 00000000..0e7aafcc --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_european_history.yaml @@ -0,0 +1,168 @@ +dataset_name: high_school_european_history +description: "The following are multiple choice questions (with answers) about high\ + \ school european history.\n\nQ: This question refers to the following information.\n\ + Albeit the king's Majesty justly and rightfully is and ought to be the supreme head\ + \ of the Church of England, and so is recognized by the clergy of this realm in\ + \ their convocations, yet nevertheless, for corroboration and confirmation thereof,\ + \ and for increase of virtue in Christ's religion within this realm of England,\ + \ and to repress and extirpate all errors, heresies, and other enormities and abuses\ + \ heretofore used in the same, be it enacted, by authority of this present Parliament,\ + \ that the king, our sovereign lord, his heirs and successors, kings of this realm,\ + \ shall be taken, accepted, and reputed the only supreme head in earth of the Church\ + \ of England, called Anglicans Ecclesia; and shall have and enjoy, annexed and united\ + \ to the imperial crown of this realm, as well the title and style thereof, as all\ + \ honors, dignities, preeminences, jurisdictions, privileges, authorities, immunities,\ + \ profits, and commodities to the said dignity of the supreme head of the same Church\ + \ belonging and appertaining; and that our said sovereign lord, his heirs and successors,\ + \ kings of this realm, shall have full power and authority from time to time to\ + \ visit, repress, redress, record, order, correct, restrain, and amend all such\ + \ errors, heresies, abuses, offenses, contempts, and enormities, whatsoever they\ + \ be, which by any manner of spiritual authority or jurisdiction ought or may lawfully\ + \ be reformed, repressed, ordered, redressed, corrected, restrained, or amended,\ + \ most to the pleasure of Almighty God, the increase of virtue in Christ's religion,\ + \ and for the conservation of the peace, unity, and tranquility of this realm; any\ + \ usage, foreign land, foreign authority, prescription, or any other thing or things\ + \ to the contrary hereof notwithstanding.\nEnglish Parliament, Act of Supremacy,\ + \ 1534\nFrom the passage, one may infer that the English Parliament wished to argue\ + \ that the Act of Supremacy would\n(A) give the English king a new position of authority\ + \ (B) give the position of head of the Church of England to Henry VIII alone and\ + \ exclude his heirs (C) establish Calvinism as the one true theology in England\ + \ (D) end various forms of corruption plaguing the Church in England\nA: Let's think\ + \ step by step. We refer to Wikipedia articles on european history for help. The\ + \ Act of Supremacy states that it grants authority to the king \"to repress and\ + \ extirpate all errors, heresies, and other enormities and abuses\", referring to\ + \ the corruption in the Church of England. The answer is (D).\n\nQ: This question\ + \ refers to the following information.\nRead the following excerpt.\nThe revolutionary\ + \ seed had penetrated into every country and spread more or less. It was greatly\ + \ developed under the r\xE9gime of the military despotism of Bonaparte. His conquests\ + \ displaced a number of laws, institutions, and customs; broke through bonds sacred\ + \ among all nations, strong enough to resist time itself; which is more than can\ + \ be said of certain benefits conferred by these innovators.\nThe monarchs will\ + \ fulfil the duties imposed upon them by Him who, by entrusting them with power,\ + \ has charged them to watch over the maintenance of justice, and the rights of all,\ + \ to avoid the paths of error, and tread firmly in the way of truth. Placed beyond\ + \ the passions which agitate society, it is in days of trial chiefly that they are\ + \ called upon to despoil realities of their false appearances, and to show themselves\ + \ as they are, fathers invested with the authority belonging by right to the heads\ + \ of families, to prove that, in days of mourning, they know how to be just, wise,\ + \ and therefore strong, and that they will not abandon the people whom they ought\ + \ to govern to be the sport of factions, to error and its consequences, which must\ + \ involve the loss of society.\nUnion between the monarchs is the basis of the policy\ + \ which must now be followed to save society from total ruin. . . .\nLet them not\ + \ confound concessions made to parties with the good they ought to do for their\ + \ people, in modifying, according to their recognized needs, such branches of the\ + \ administration as require it.\nLet them be just, but strong; beneficent, but strict.\n\ + Let them maintain religious principles in all their purity, and not allow the faith\ + \ to be attacked and morality interpreted according to the social contract or the\ + \ visions of foolish sectarians.\nLet them suppress Secret Societies; that gangrene\ + \ of society.\n\u2014Klemens von Metternich, Political Confession of Faith, 1820\n\ + Which of the following was the greatest cause of the fears expressed by Metternich\ + \ in the document above?\n(A) The ideas of personal liberty and nationalism conceived\ + \ during the Enlightenment resulted in radical revolutions that could spread throughout\ + \ Europe. (B) The conquest of Europe by Napoleon led to the creation of new factions\ + \ and shifted the European balance of power. (C) The power of monarchs had grown\ + \ to the point where it needed to be checked by other powers within each nation\ + \ or domination of civilians would occur. (D) The rising and falling economic cycle\ + \ of the newly emerging capitalist economy could lead to civilian unrest that must\ + \ be suppressed.\nA: Let's think step by step. We refer to Wikipedia articles on\ + \ european history for help. The fears of revolution in early 19th century Europe\ + \ expressed by Klemens von Metternich, a conservative Austrian statesman, were a\ + \ direct result of the age of Enlightenment, a period of European history where\ + \ the absolute power of the monarchy was challenged with ideas of individual liberty\ + \ and nationalism, leading to the French revolution and its effects all over Europe.\ + \ The answer is (A).\n\nQ: This question refers to the following information.\n\ + The excerpts below are from the Navigation Acts of 1651.\n[A]fter the first day\ + \ of December, one thousand six hundred fifty and one, and from thence forwards,\ + \ no goods or commodities whatsoever of the growth, production or manufacture of\ + \ Asia, Africa or America, or of any part thereof; or of any islands belonging to\ + \ them, or which are described or laid down in the usual maps or cards of those\ + \ places, as well of the English plantations as others, shall be imported or brought\ + \ into this Commonwealth of England, or into Ireland, or any other lands, islands,\ + \ plantations, or territories to this Commonwealth belonging, or in their possession,\ + \ in any other ship or ships, vessel or vessels whatsoever, but only in such as\ + \ do truly and without fraud belong only to the people of this Commonwealth, or\ + \ the plantations thereof, as the proprietors or right owners thereof; and whereof\ + \ the master and mariners are also of the people of this Commonwealth, under the\ + \ penalty of the forfeiture and loss of all the goods that shall be imported contrary\ + \ to this act, , , ,\n[N]o goods or commodities of the growth, production, or manufacture\ + \ of Europe, or of any part thereof, shall after the first day of December, one\ + \ thousand six hundred fifty and one, be imported or brought into this Commonwealth\ + \ of England, or any other lands or territories to this Commonwealth belonging,\ + \ or in their possession, in any ship or ships, vessel or vessels whatsoever, but\ + \ in such as do truly and without fraud belong only to the people of this Commonwealth,\ + \ and in no other, except only such foreign ships and vessels as do truly and properly\ + \ belong to the people of that country or place, of which the said goods are the\ + \ growth, production or manufacture.\nWhich of the following best describes the\ + \ outcome of the Navigation Acts of 1651?\n(A) They served as a catalyst for the\ + \ growth of English shipping and overseas trade, but did little to limit the prospects\ + \ of the Dutch in the seventeenth century. (B) They brought about almost immediate\ + \ hardships for the Dutch economy as their dominance of overseas trade quickly ended.\ + \ (C) They were rescinded during the restoration of the Stuarts as they sought normal\ + \ diplomatic relations with the Dutch so not as to need Parliament's financial support\ + \ for war. (D) They led to nearly a century of recurrent war between England and\ + \ the Netherlands, which would not end until after American independence.\nA: Let's\ + \ think step by step. We refer to Wikipedia articles on european history for help.\ + \ The Navigation Acts of 1651 helped English shipping by restricting the ability\ + \ of ships from other European countries, especially the Dutch, to transport goods\ + \ from colonies in Asia and Africa into England. The answer is (A).\n\nQ: This question\ + \ refers to the following information.\nIn Russia there was nothing going on well,\ + \ and [Souvarine] was in despair over the news he had received. His old companions\ + \ were all turning to the politicians; the famous Nihilists who made Europe tremble-sons\ + \ of village priests, of the lower middle class, of tradesmen-could not rise above\ + \ the idea of national liberation, and seemed to believe that the world would be\ + \ delivered-when they had killed their despot&\u2026\n\"Foolery! They'll never get\ + \ out of it with their foolery.\"\nThen, lowering his voice still more, in a few\ + \ bitter words he described his old dream of fraternity. He had renounced his rank\ + \ and his fortune; he had gone among workmen, only in the hope of seeing at last\ + \ the foundation of a new society of labour in common. All the sous in his pockets\ + \ had long gone to the urchins of the settlement; he had been as tender as a brother\ + \ with the colliers, smiling at their suspicion, winning them over by his quiet\ + \ workmanlike ways and his dislike of chattering. But decidedly the fusion had not\ + \ taken place.\nHis voice changed, his eyes grew bright, he fixed them on \xE9tienne,\ + \ directly addressing him:\n\"Now, do you understand that? These hatworkers at Marseilles\ + \ who have won the great lottery prize of a hundred thousand francs have gone off\ + \ at once and invested it, declaring that they are going to live without doing anything!\ + \ Yes, that is your idea, all of you French workmen; you want to unearth a treasure\ + \ in order to devour it alone afterwards in some lazy, selfish corner. You may cry\ + \ out as much as you like against the rich, you haven't got courage enough to give\ + \ back to the poor the money that luck brings you. You will never be worthy of happiness\ + \ as long as you own anything, and your hatred of the bourgeois proceeds solely\ + \ from an angry desire to be bourgeois yourselves in their place.\"\n\xE9mile Zola,\ + \ French writer, Germinal, 1885\nThe passage displays the direct concern for the\ + \ welfare of the working classes that was typically a part of which movement?\n\ + (A) Capitalist (B) Scientific (C) Communist (D) Existentialist\nA: Let's think step\ + \ by step. We refer to Wikipedia articles on european history for help. The modern\ + \ Communist movement aims to establish a classless society based on communal ownership\ + \ and distribution of property and means of production, thereby especially benefiting\ + \ the working classes. The answer is (C).\n\nQ: This question refers to the following\ + \ information.\nThe following excerpt is from a pamphlet.\nYou will do me the justice\ + \ to remember, that I have always strenuously supported the Right of every man to\ + \ his own opinion, however different that opinion might be to mine. He who denies\ + \ to another this right, makes a slave of himself to his present opinion, because\ + \ he precludes himself the right of changing it.\nThe most formidable weapon against\ + \ errors of every kind is Reason. I have never used any other, and I trust I never\ + \ shall.\nThe circumstance that has now taken place in France of the total abolition\ + \ of the whole national order of priesthood, and of everything appertaining to compulsive\ + \ systems of religion, and compulsive articles of faith, has not only precipitated\ + \ my intention, but rendered a work of this kind exceedingly necessary, lest in\ + \ the general wreck of superstition, of false systems of government, and false theology,\ + \ we lose sight of morality, of humanity, and of the theology that is true.\nI believe\ + \ in one God, and no more; and I hope for happiness beyond this life.\nI believe\ + \ in the equality of man; and I believe that religious duties consist in doing justice,\ + \ loving mercy, and endeavoring to make our fellow-creatures happy.\nI do not believe\ + \ in the creed professed by the Jewish church, by the Roman church, by the Greek\ + \ church, by the Turkish church, by the Protestant church, nor by any church that\ + \ I know of. My own mind is my own church.\nAll national institutions of churches,\ + \ whether Jewish, Christian or Turkish, appear to me no other than human inventions,\ + \ set up to terrify and enslave mankind, and monopolize power and profit.\nI do\ + \ not mean by this declaration to condemn those who believe otherwise; they have\ + \ the same right to their belief as I have to mine.\n\u2014Thomas Paine, The Age\ + \ of Reason, 1794\u20131795\nWhich of the following Enlightenment philosophes designed\ + \ a system of checks and balances for government to avoid abuses of power?\n(A)\ + \ Jean Jacques Rousseau (B) Baron Montesquieu (C) Mary Wollstonecraft (D) Adam Smith\n\ + A: Let's think step by step. We refer to Wikipedia articles on european history\ + \ for help. Baron Montesquieu was a 18th centrury French philsopher who wrote extensively\ + \ against the monoplization of power and advocated for a system of checks and balances\ + \ in government to prevent the rise of despotism. The answer is (B)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_european_history diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_geography.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_geography.yaml new file mode 100644 index 00000000..42f6c040 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_geography.yaml @@ -0,0 +1,63 @@ +dataset_name: high_school_geography +description: 'The following are multiple choice questions (with answers) about high + school geography. + + + Q: Which one of the following items is an example of nonmaterial culture? + + (A) Dove soap (B) Dove candy bar (C) Dove symbol (D) A dove (bird). + + A: Let''s think step by step. We refer to Wikipedia articles on geography for help. + Nonmaterial culture consists of cultural ideas, beliefs or symbols that are not + physical objects. The answer is (C). + + + Q: During the third stage of the demographic transition model, which of the following + is true? + + (A) Birth rates increase and population growth rate is less rapid. (B) Birth rates + decline and population growth rate is less rapid. (C) Birth rates increase and population + growth rate increases. (D) Birth rates decrease and population growth rate increases. + + A: Let''s think step by step. We refer to Wikipedia articles on geography for help. + The demographic transition model models the five different stages of population + growth as a country goes through economic development, where the third stage refers + to a period of declining birth rates and lower population growth. The answer is + (B). + + + Q: The practice of hiring a foreign third-party service provider to run an operation + is called + + (A) outsourcing. (B) offshoring. (C) maquiladoras. (D) locational interdependence. + + A: Let''s think step by step. We refer to Wikipedia articles on geography for help. + "Offshoring" literally means to move or base some of the activities or processes + of a company to a foreign country. The answer is (B). + + + Q: Which of the following statements is NOT accurate regarding the services provided + by local governments in the United States? + + (A) Duplication of efforts occurs often. (B) Social problems of the central city + spill over into the surrounding residential suburbs. (C) Inefficiency in providing + services occurs often. (D) One neighborhood''s efforts to reduce pollution are always + supported by neighboring communities. + + A: Let''s think step by step. We refer to Wikipedia articles on geography for help. + There may be economic, social or political reasons for two neighboring communities + and their local governments not agreeing to pollution reduction efforts initiated + by one of them. The answer is (D). + + + Q: The rate of natural increase of a population is found by subtracting the + + (A) crude death rate from the crude birth date. (B) crude birth rate from the crude + death rate. (C) doubling time from the crude birth rate. (D) fertility rate from + the crude death rate. + + A: Let''s think step by step. We refer to Wikipedia articles on geography for help. + The difference between number of births and deaths gives the population increase + at any given time. The answer is (A).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_geography diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_government_and_politics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_government_and_politics.yaml new file mode 100644 index 00000000..8ec1c5b0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_government_and_politics.yaml @@ -0,0 +1,67 @@ +dataset_name: high_school_government_and_politics +description: 'The following are multiple choice questions (with answers) about high + school government and politics. + + + Q: Which of the following best states an argument made by James Madison in The Federalist + number 10? + + (A) Honest politicians can prevent factions from developing. (B) Factions are more + likely to occur in large republics than in small ones. (C) The negative effects + of factionalism can be reduced by a republican government. (D) Free elections are + the people''s best defense against factionalism. + + A: Let''s think step by step. We refer to Wikipedia articles on government and politics + for help. In the Federalist number 10, James Madison advocated for a representative + republican form of government to guard against factionalism. The answer is (C). + + + Q: The term "budget deficit" refers to the + + (A) annual increase in federal spending on the military (B) amount of interest on + the national debt (C) difference between the initial budget proposals made by the + president and Congress (D) amount the government spends in excess of its revenues + + A: Let''s think step by step. We refer to Wikipedia articles on government and politics + for help. When the goverment spends more than it earns, their difference is the + budget deficit. The answer is (D). + + + Q: Which of the following statements about cabinet departments is FALSE? + + (A) They are established by the legislative branch. (B) Their members often don''t + have much influence over presidential decisions. (C) They cannot all be run by leaders + who belong to the same political party the president does. (D) Not every federal + agency is a cabinet department. + + A: Let''s think step by step. We refer to Wikipedia articles on government and politics + for help. There is no law stipulating that some cabinet department leaders have + to belong to a political party different from that of the president. The answer + is (C). + + + Q: Which of the following cases established the precedent that a defendant must + be informed of the right to remain silent, the right to a lawyer, and protection + from self-incrimination? + + (A) Weeks v. United States (B) Betts v. Brady (C) Mapp v. Ohio (D) Miranda v. Arizona + + A: Let''s think step by step. We refer to Wikipedia articles on government and politics + for help. In the landmark Miranda v. Arizona in 1966, the US Supreme Court, based + on the Fifth and Sixth Amendment of the US Constitution, guaranteed a defendant''s + right to an attorney and protection from self-incrimination. The answer is (D). + + + Q: Uncertainty over the limits to presidential power is caused primarily by the + fact that + + (A) the constitutional definition of those powers is broad and unspecific (B) most + people agree that the Constitution places too many limits on presidential power + (C) the Supreme Court consistently refuses to rule on cases concerning presidential + powers (D) constitutional amendments have greatly increased presidential powers + + A: Let''s think step by step. We refer to Wikipedia articles on government and politics + for help. The US Constitution is not very specific about the powers of the president, + leading to uncertainty over its limits. The answer is (A).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_government_and_politics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_macroeconomics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_macroeconomics.yaml new file mode 100644 index 00000000..f47a83e6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_macroeconomics.yaml @@ -0,0 +1,64 @@ +dataset_name: high_school_macroeconomics +description: 'The following are multiple choice questions (with answers) about high + school macroeconomics. + + + Q: Which of the following policies best describes supply-side fiscal policy? + + (A) An increase in the money supply (B) Increased government spending (C) Lower + taxes on research and development of new technology (D) Higher taxes on household + income + + A: Let''s think step by step. We refer to Wikipedia articles on macroeconomics for + help. Supply-side fiscal policy stimulates the economy by encouraging more production + of goods and services through reduction in taxes and deregulation. The answer is + (C). + + + Q: The short-run Phillips curve indicates a + + (A) direct relation between unemployment and inflation (B) direct relation between + price and quantity demanded (C) inverse relation between price and quantity demanded + (D) inverse relation between unemployment and inflation + + A: Let''s think step by step. We refer to Wikipedia articles on macroeconomics for + help. The short-run Phillips curve shows that whenever unemployment decreases below + a natural level, the inflation starts increasing, and vice-versa. The answer is + (D). + + + Q: Holding all else equal which of the following monetary policies would be used + to boost U.S. exports? + + (A) Increasing the discount rate (B) Increasing the reserve ratio (C) Buying government + securities (D) Lowering tariffs + + A: Let''s think step by step. We refer to Wikipedia articles on macroeconomics for + help. Buying government securities leads to reduction in demand for US dollars from + foreign buyers, thereby making it cheaper and hence making US exports more attractive. + The answer is (C). + + + Q: A federal deficit occurs when + + (A) exports exceed imports. (B) imports exceed exports. (C) federal tax collections + exceed spending. (D) federal spending exceeds federal tax revenues. + + A: Let''s think step by step. We refer to Wikipedia articles on macroeconomics for + help. A federal deficit occurs when federal spending exceeds federal income which + is primarily from tax revenues. The answer is (D). + + + Q: Which of the following is not included in the U.S. GDP? + + (A) The U.S. military opens a new base in a foreign country with 1000 U.S. personnel. + (B) Japanese consumers buy thousands of CDs produced in the United States. (C) An + American pop singer performs a sold-out concert in Paris. (D) A French theatrical + production tours dozens of American cities. + + A: Let''s think step by step. We refer to Wikipedia articles on macroeconomics for + help. The economic transactions related to the performance of the American pop-singer + in Paris happens entirely outside the U.S. and hence is not included in the GDP + numbers. The answer is (C).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_macroeconomics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_mathematics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_mathematics.yaml new file mode 100644 index 00000000..eb692a09 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_mathematics.yaml @@ -0,0 +1,36 @@ +dataset_name: high_school_mathematics +description: "The following are multiple choice questions (with answers) about high\ + \ school mathematics.\n\nQ: Simplify and write the result with a rational denominator:\ + \ $$\\sqrt{\\sqrt[3]{\\sqrt{\\frac{1}{729}}}}$$\n(A) \\frac{3\\sqrt{3}}{3} (B) \\\ + frac{1}{3} (C) \\sqrt{3} (D) \\frac{\\sqrt{3}}{3}\nA: Let's think step by step.\ + \ Factoring $729=3^6$ and combining the roots $\\frac{1}{2}\\frac{1}{3}\\frac{1}{2}=\\\ + frac{1}{12}$, we get that $\\sqrt{\\sqrt[3]{\\sqrt{\\frac{1}{729}}}}=\\left(\\frac{1}{3^6}\\\ + right)^{\\frac{1}{12}}=\\frac{1}{3^{\\frac{1}{2}}}=\\frac{3}{\\sqrt{3}}$ The answer\ + \ is (D).\n\nQ: Five thousand dollars compounded annually at an $x\\%$ interest\ + \ rate takes six years to double. At the same interest rate, how many years will\ + \ it take $\\$300$ to grow to $\\$9600$?\n(A) 12 (B) 1 (C) 30 (D) 5\nA: Let's think\ + \ step by step. To go from $\\$300$ to $\\$9600$, the value must go up by a factor\ + \ of $9600/300=32=2^5$. Since at this interest rate it takes six years for it to\ + \ double, it will take $5*6=30$ years to grow to $\\$9600$. The answer is (C).\n\ + \nQ: Ten students take a biology test and receive the following scores: 45, 55,\ + \ 50, 70, 65, 80, 40, 90, 70, 85. What is the mean of the students\u2019 test scores?\n\ + (A) 55 (B) 60 (C) 62 (D) 65\nA: Let's think step by step. There are 10 students\ + \ and the sum of their scores is $45 + 55 + 50 + 70 + 65 + 80 + 40 + 90 + 70 + 85\ + \ = 650$, the mean is $650/10=65$. The answer is (D).\n\nQ: The variable $x$ varies\ + \ directly as the square of $y$, and $y$ varies directly as the cube of $z$. If\ + \ $x$ equals $-16$ when $z$ equals 2, what is the value of $x$ when $z$ equals $\\\ + frac{1}{2}$?\n(A) -1 (B) 16 (C) -\\frac{1}{256} (D) \\frac{1}{16}\nA: Let's think\ + \ step by step. We know that $x \\propto y^2$ and $y \\propto z^3$, so $x = k z^6$\ + \ for some constant $k$. Plugging in for $x=-16$ and $z=2$, the constant value is\ + \ $k=\\frac{x}{z^6}=\\frac{-16}{64}=-\\frac{1}{4}$. So, when $z=\\frac{1}{2}$, the\ + \ value of $x$ is $x=kz^6=-\\frac{1}{4}\\frac{1}{2^6}=-\\frac{1}{256}$. The answer\ + \ is (C).\n\nQ: Joe was in charge of lights for a dance. The red light blinks every\ + \ two seconds, the yellow light every three seconds, and the blue light every five\ + \ seconds. If we include the very beginning and very end of the dance, how many\ + \ times during a seven minute dance will all the lights come on at the same time?\ + \ (Assume that all three lights blink simultaneously at the very beginning of the\ + \ dance.)\n(A) 3 (B) 15 (C) 6 (D) 5\nA: Let's think step by step. The least common\ + \ multiple of 2, 3 and 5 is 30, so during a 7 minute dance, all the three lights\ + \ will come on at the same time $2*7+1=15$ times. The answer is (B)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_mathematics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_microeconomics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_microeconomics.yaml new file mode 100644 index 00000000..86c83c82 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_microeconomics.yaml @@ -0,0 +1,63 @@ +dataset_name: high_school_microeconomics +description: 'The following are multiple choice questions (with answers) about high + school microeconomics. + + + Q: Which of the following is necessarily a characteristic of oligopoly? + + (A) Free entry into and exit from the market (B) A few large producers (C) One producer + of a good with no close substitutes (D) A homogenous product + + A: Let''s think step by step. We refer to Wikipedia articles on microeconomics for + help. An oligopoly is when a market is dominated by just one or a few number of + sellers or producers. To get oligopoly, the market should have high barriers to + new entry, and the product has differentiation. The answer is (B). + + + Q: If the government subsidizes producers in a perfectly competitive market, then + + (A) the demand for the product will increase (B) the demand for the product will + decrease (C) the consumer surplus will increase (D) the consumer surplus will decrease + + A: Let''s think step by step. We refer to Wikipedia articles on microeconomics for + help. (A) and (B) are wrong because the demand curve does not change at all. If + the government subsidizes producers, the supply will increase, and thus the consumer + surplus also increases. The answer is (C). + + + Q: Which of the following is true of a price floor? + + (A) The price floor shifts the demand curve to the left. (B) An effective floor + creates a shortage of the good. (C) The price floor shifts the supply curve of the + good to the right. (D) To be an effective floor, it must be set above the equilibrium + price. + + A: Let''s think step by step. We refer to Wikipedia articles on microeconomics for + help. Price floor does not shift the demand or shift curve. An effective price floor + should be set above the equilibrium price, otherwise the market bears and the floor + does not have effective effect. The answer is (D). + + + Q: The concentration ratio for a monopoly is + + (A) 0 (B) 5 (C) 10 (D) 100 + + A: Let''s think step by step. We refer to Wikipedia articles on microeconomics for + help. The concentration ratio is calculated as the sum of market share of a specific + number of largest companies. Monopoly means one company or entity controls the entire + market, therefore, the concentration ratio is 100 percent. The answer is (D). + + + Q: In a competitive labor market for housepainters, which of the following would + increase the demand for housepainters? + + (A) An effective minimum wage imposed on this labor market. (B) An increase in the + price of gallons of paint. (C) An increase in the construction of new houses. (D) + An increase in the price of mechanical painters so long as the output effect exceeds + the substitution effect. + + A: Let''s think step by step. We refer to Wikipedia articles on microeconomics for + help. An increase in the construction of new houses means an increase demand of + in-house painting, thus increases the demand for housepainters. The answer is (C).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_microeconomics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_physics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_physics.yaml new file mode 100644 index 00000000..f21a183c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_physics.yaml @@ -0,0 +1,38 @@ +dataset_name: high_school_physics +description: "The following are multiple choice questions (with answers) about high\ + \ school physics.\n\nQ: A microwave oven is connected to an outlet, 120 V, and draws\ + \ a current of 2 amps. At what rate is energy being used by the microwave oven?\n\ + (A) 10 W (B) 30 W (C) 60 W (D) 240 W\nA: Let's think step by step. Rate of energy\ + \ usage is known as power; in an dissipative electrical circuit, power is given\ + \ by voltage times current. So in our case, the power is 120 V times 2 amps, or\ + \ 240 W. The answer is (D).\n\nQ: A point charge, Q = +1 mC, is fixed at the origin.\ + \ How much work is required to move a charge, Q = +8 \xB5C, from the point (0, 4\ + \ meters) to the point (3 meters, 0)?\n(A) 3.5 J (B) 6.0 J (C) 22.5 J (D) 40 J\n\ + A: Let's think step by step. To calculate the work required to move a charge from\ + \ one location to another in a fixed electric field, it is enough to calculate the\ + \ potential difference between the two locations. Here, the potential only depends\ + \ on the distance between the charges; it\u2019s $k q_1 q_2 / r$, where $k$ is Coulomb\u2019\ + s constant. Plugging in values $q_1 = $ 1 mC, $q_2 = 8 \\mu$ C, gives the answer\ + \ as 5.992 J, which rounds to 6 J. The answer is (B).\n\nQ: Which of the following\ + \ conditions will ensure that angular momentum is conserved? I. Conservation of\ + \ linear momentum II. Zero net external force III. Zero net external torque\n(A)\ + \ I and II only (B) I and III only (C) II and III only (D) III only\nA: Let's think\ + \ step by step. Torque is defined as the change in angular momentum; if there is\ + \ zero external torque, angular momentum is conserved. The answer is (D).\n\nQ:\ + \ A photocell of work function \u03D5 = 2eV is connected to a resistor in series.\ + \ Light of frequency f = 1 \xD7 10^15 Hz hits a metal plate of the photocell. If\ + \ the power of the light is P = 100 W, what is the current through the resistor?\n\ + (A) 2:00 AM (B) 6:00 AM (C) 12:00 AM (D) 24 A\nA: Let's think step by step. The\ + \ only answer above which has units of current is D, 24 A. The answer is (D).\n\n\ + Q: A pipe full of air is closed at one end. A standing wave is produced in the pipe,\ + \ causing the pipe to sound a note. Which of the following is a correct statement\ + \ about the wave\u2019s properties at the closed end of the pipe?\n(A) The pressure\ + \ is at a node, but the particle displacement is at an antinode. (B) The pressure\ + \ is at an antinode, but the particle displacement is at a node. (C) The pressure\ + \ and the particle displacement are both at nodes. (D) The pressure and the particle\ + \ displacement are both at antinodes.\nA: Let's think step by step. At the closed\ + \ end of the pipe, the particles cannot have any net displacement because the pipe\ + \ closure stops them. So the particle displacement is at a node. This closure also\ + \ causes the pressure to be maximal, i.e. an antinode. The answer is (B)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_physics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_psychology.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_psychology.yaml new file mode 100644 index 00000000..706db0ec --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_psychology.yaml @@ -0,0 +1,72 @@ +dataset_name: high_school_psychology +description: 'The following are multiple choice questions (with answers) about high + school psychology. + + + Q: Pascale is interested in the processing strategies children use to learn new + information. Pascale would best be classified as what type of psychologist? + + (A) sociocultural (B) clinical (C) cognitive (D) behaviorist + + A: Let''s think step by step. We refer to Wikipedia articles on psychology for help. + Sociocultural psychologist focuses on the effect of societal factors on people. + Clinical psychologist focuses on people with mental issues. Cognitive psychologist + focuses on how people think and learn, including the processing strategies. Behaviorist + focuses more on the environment and experience effect on people. The answer is (C). + + + Q: According to Caplan''s model of consultee-centered case consultation, the consultant + is primarily interested in + + (A) identifying the causes and solutions of the client''s presenting problems (B) + identifying and eliminating the causes of the consultee''s difficulties in handling + a problem (C) establishing a hierarchy of authority to enable effective decision + making (D) presenting a single, well-defined and unambiguous course of action for + the consultant to overcome skills deficits + + A: Let''s think step by step. We refer to Wikipedia articles on psychology for help. + Caplan defines two type of consultation. Client-centered case consultation aims + to handle client''s problems, while consultee-centered case consultation aims to + identify the reason of client''s difficulty to solve problems. The answer is (B). + + + Q: According to the Individuals with Disabilities Education Improvement Act, which + of the following must an educational agency do before it changes the educational + placement of a student with a disability? + + (A) Give the child a trial period in the new environment (B) Notify the parents + in writing (C) Obtain school board approval (D) Obtain parental consent + + A: Let''s think step by step. We refer to Wikipedia articles on psychology for help. + When the decision to change the educational placement of a student with a disability + is made, the educational agency must notify the parents in writing on that date. + The answer is (B). + + + Q: While swimming in the ocean, Ivan is frightened by a dark shadow in the water + even before he has the chance to identify what the shadow is. The synaptic connections + taking place during this incident of fright are best described by which of the following? + + (A) Messages are sent from the thalamus directly to the amygdala. (B) Messages are + sent from the thalamus to the "what" and "where" pathways. (C) Messages are sent + from the parasympathetic nervous system to the cerebral cortex. (D) Messages are + sent from the frontal lobes to the pituitary gland. + + A: Let''s think step by step. We refer to Wikipedia articles on psychology for help. + Our neural system has a mechanism that can respond immediate emotional signal before + going to the thought center. In the Ivan''s case, messages travel directly from + thalamus to amygdala. The answer is (A). + + + Q: Ani believes that her attitudes and behavior play a central role in what happens + to her. Such a belief is likely to be associated with + + (A) a strong superego. (B) low self-esteem. (C) low self-efficacy. (D) an internal + locus of control. + + A: Let''s think step by step. We refer to Wikipedia articles on psychology for help. + People with an external locus of control believes fate and luck play an important + role in their lives, while people with an internal locus of control believes they + control their lives. The answer is (D).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_psychology diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_statistics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_statistics.yaml new file mode 100644 index 00000000..37e21061 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_statistics.yaml @@ -0,0 +1,88 @@ +dataset_name: high_school_statistics +description: 'The following are multiple choice questions (with answers) about high + school statistics. + + + Q: A new smartwatch is manufactured in one part of a factory, then secured for shipping + in another, independent part of the factory. The weight of the smartwatch has a + mean of 62 grams and a standard deviation of 1.0 grams. The weight of the packaging + (box, user''s guide, bubble wrap, etc.) has a mean of 456 grams and a standard deviation + of 6 grams. Together, the distribution of the weight of the smartwatch and its packaging + would have the following mean and standard deviation: + + (A) Mean 518 grams; standard deviation 7.0 grams (B) Mean 518 grams; standard deviation + 3.5 grams (C) Mean 518 grams; standard deviation 6.1 grams (D) Mean 394 grams; standard + deviation 6.1 grams + + A: Let''s think step by step. Since the weight of the watch and the weight of the + packaging are independent random variables, the mean and variance of their sum is + equal to the sum of their individual means and variances. So the mean is 62 + 456 + = 518 grams, and the variances is 1.0^2 + 6.0^2 = 37, leading to a standard deviation + of 6.1 grams. The answer is (C). + + + Q: After a frost warning was issued, the owner of a large orange grove asked his + workers to spray all his trees with water. The water was supposed to freeze and + form a protective covering of ice around the orange blossom. Nevertheless, the owner + suspected that some trees suffered considerable damage due to the frost. To estimate + the proportion of trees that suffered more than 50 percent damage due to the frost, + he took a random sample of 100 trees from his grove. What is the response variable + in this experiment? + + (A) The proportion of trees that suffered more than 50 percent damage due to frost. + (B) The number of trees affected by the frost. (C) The number of trees sampled from + the grove. (D) For each sampled tree, whether it suffered more than 50 percent damage + or at most 50 percent damage. + + A: Let''s think step by step. In this experiment, the response variable is what + is measured. For each tree, what is measured is whether or not it suffered more + than 50 percent damage due to the frost. The answer is (D). + + + Q: Suppose X and Y are random variables with E(X) = 37, var(X) = 5, E(Y) = 62, and + var(Y) = 12. What are the expected value and variance of the random variable X + + Y? + + (A) E(X + Y) = 99, var(X + Y) = 8.5 (B) E(X + Y) = 99, var(X + Y) = 13 (C) E(X + + Y) = 99, var(X + Y) = 17 (D) There is insufficient information to answer this question. + + A: Let''s think step by step. While means of sums of random variables add (regardless + of whether the variables are independent) in order to determine the variance of + a sum of random variables, we need to know not just their individual variances but + the covariance of the two variables, which is not given in this problem. The answer + is (D). + + + Q: Which of the following sets has the smallest standard deviation? Which has the + largest? + + I: {1,2,3} + + II: {-10,10} + + III: {100} + + (A) I, II (B) II, III (C) III, I (D) III, II + + A: Let''s think step by step. The variance of distribution I is the expected squared + deviation from its mean (which is 2), so the variance is 2/3 . The variance of distribution + II is 10^2 (because both elements are 10 away from the mean of zero). The variance + of distribution III is 0, since it has a single entry. So distribution III has the + smallest standard deviation and distribution II has the largest. The answer is (D). + + + Q: Which of the following is a correct statement about correlation? + + (A) If the slope of the regression line is exactly 1, then the correlation is exactly + 1. (B) If the correlation is 0, then the slope of the regression line is undefined. + (C) Switching which variable is called x and which is called y changes the sign + of the correlation. (D) The correlation r is equal to the slope of the regression + line when z-scores for the y-variable are plotted against z-scores for the x-variable. + + A: Let''s think step by step. Statement A is false because the slope of the regression + line being exactly 1 can occur even when the two variables are not perfectly correlated. + Statement B is false because uncorrelated variables regression lines can have slope + zero. Statement C is false because correlation is symmetric in the two random variables. + The answer is (D).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_statistics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_us_history.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_us_history.yaml new file mode 100644 index 00000000..951666d1 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_us_history.yaml @@ -0,0 +1,133 @@ +dataset_name: high_school_us_history +description: "The following are multiple choice questions (with answers) about high\ + \ school us history.\n\nQ: This question refers to the following information.\n\ + I come not to urge personal claims, nor to seek individual benefits; I appear as\ + \ the advocate of those who cannot plead their own cause; I come as the friend of\ + \ those who are deserted, oppressed, and desolate. In the Providence of God, I am\ + \ the voice of the maniac whose piercing cries from the dreary dungeons of your\ + \ jails penetrate not your Halls of Legislation. I am the Hope of the poor crazed\ + \ beings who pine in the cells, and stalls, and cages, and waste rooms of your poor-houses.\ + \ I am the Revelation of hundreds of wailing, suffering creatures, hidden in your\ + \ private dwellings, and in pens and cabins\u2014shut out, cut off from all healing\ + \ influences, from all mind-restoring cares.\u2026 Could their melancholy histories\ + \ be spread before you as revealed to my grieved spirit during the last three months,\ + \ how promptly, how earnestly would you search out the most approved means of relief;\ + \ how trifling, how insignificant, by comparison, would appear the sacrifices you\ + \ are asked to make; how would a few dimes and dollars, gathered from each citizen,\ + \ diminish in value as a possession, compared with the certain benefits and vast\ + \ good to be secured for the suffering insane...by the consecration and application\ + \ of a sufficient fund to the construction of a suitable hospital.\u2026\n\u2014\ + Dorothea Dix, Memorial Soliciting a State Hospital for the Protection and Cure of\ + \ the Insane,\nSubmitted to the General Assembly of North Carolina, November 1848\n\ + Dorothea Dix can best be compared to whom?\n(A) Abigail Adams (B) Clara Barton (C)\ + \ Shirley Temple (D) Hillary Clinton\nA: Let's think step by step. We refer to Wikipedia\ + \ articles on us history for help. Both Dorothea Dix and Clara barton are American\ + \ nurses. The answer is (B).\n\nQ: This question refers to the following information.\n\ + \"As our late Conduct at the Conestoga Manor and Lancaster have occasioned much\ + \ Speculation & a great diversity of Sentiments in this and neighboring Governments;\ + \ some vindicating & others condemning it; some charitably alleviating the Crime,\ + \ & others maliciously painting it in the most odious & detestable Colours, we think\ + \ it our duty to lay before the Publick, the whole Matter as it appeared, & still\ + \ appears, to us. . . .\n\"If these things are not sufficient to prove an unjustifiable\ + \ Attachment in the Quakers to the Indians Savages, a fixed Resolution to befriend\ + \ them & an utter insensibility to human Distresses, let us consider a few more\ + \ recent Facts. When we found the last Summer that we were likely to get no Assistance\ + \ from the Government, some Volunteers went out at our own Expense, determined to\ + \ drive our Enemies from our Borders; & when we came near to the great Island, we\ + \ understood that a Number of their Warriors had gone out against our Frontiers.\ + \ Upon this we returned and came up with them and fought with them at the Munfey\ + \ Hill where we lost some of our Men & killed some of their Warriors & thereby saved\ + \ our Frontiers from this Story in another Expedition. But no sooner had we destroyed\ + \ their Provisions on the great Island, & ruined their trade with the good People\ + \ at Bethlehem, but these very Indians, who were justly suspected of having murdered\ + \ our Friends in Northampton County, were by the Influence of some Quakers taken\ + \ under the Protection of the Government to screen them from the Resentments of\ + \ the Friends and Relations of the Murdered, & to support them thro the Winter.\"\ + \n\u2014\"Apology of the Paxton Boys\" (pamphlet), 1764 (Note: \"apology\" in this\ + \ context should be read as an explanation, not an admission of guilt or regret.\n\ + The sentiments expressed in the explanation above reflect which of the ongoing tensions\ + \ during the colonial period of American history?\n(A) Tensions between British\ + \ policies and the aspirations of North American colonists. (B) Tensions between\ + \ American Indians allied with the French and those allied with the British. (C)\ + \ Tensions between freed African Americans and white planters. (D) Tensions between\ + \ backcountry settlers and elites within colonial America.\nA: Let's think step\ + \ by step. We refer to Wikipedia articles on us history for help. After the French\ + \ and Indian War, the Scotch-Irish settlers attacked American Indians. After the\ + \ attacks on the Conestoga, about 250 Paxton Boys present their grievances to the\ + \ Pennsylvania legislature. As mentioned in the information, the Paxton Boys cited\ + \ resentiment at local elites. The answer is (D).\n\nQ: This question refers to\ + \ the following information.\nOur leaders talk about stopping aggression from the\ + \ north, but this was a struggle among groups of Vietnamese until we intervened.\ + \ We seem bent upon saving the Vietnamese from Ho Chi Minh even if we have to kill\ + \ them and demolish their country to do it. As the native people survey bombed-out\ + \ villages, women and children burned by napalm, rice crops destroyed and cities\ + \ overrun with our military personnel, they are doubtless saying secretly of the\ + \ Vietcong guerillas and of the American forces, \"A plague on both your houses.\"\ + \ \u2026 Stop the bombing, north and south, end search and destroy offensive sweeps,\ + \ and confine our military action to holding operations on the ground. Bombing the\ + \ north has failed to halt or seriously check the flow of troops to the south and\ + \ may, in fact, have prompted a much greater war effort by Hanoi.\n\u2014Senator\ + \ George McGovern, \"The Lessons of Vietnam,\" April 25, 1967\nWhich of the following\ + \ opinions from the 1960s most directly reflects the perspective of George McGovern's\ + \ speech?\n(A) Americans must maximize their technological edge in Vietnam. (B)\ + \ American bombing in Vietnam is step by step leading to progress in the war. (C)\ + \ American bombing in Vietnam is a failure. (D) America must not give in to defeatism\ + \ about the war in Vietnam.\nA: Let's think step by step. We refer to Wikipedia\ + \ articles on us history for help. \"Stop the bombing\" and \"Bombing the north\ + \ has failed to halt or seriously check the flow of troops to the south\" indicate\ + \ that the perspective of George McGovern's speech is that Amerian bombing in Vietnam\ + \ is a failure. The answer is (C).\n\nQ: This question refers to the following information.\n\ + \"In the new Code of Laws which I suppose it will be necessary for you to make I\ + \ desire you would Remember the Ladies, and be more generous and favorable to them\ + \ than your ancestors. Do not put such unlimited power into the hands of the Husbands.\ + \ Remember all Men would be tyrants if they could. If particular care and attention\ + \ is not paid to the Ladies we are determined to foment a Rebellion, and will not\ + \ hold ourselves bound by any Laws in which we have no voice, or Representation.\"\ + \nAbigail Adams, in a letter to John Adams, 1776\n\"Special legislation for woman\ + \ has placed us in a most anomalous position. Women invested with the rights of\ + \ citizens in one section\u2014voters, jurors, office-holders\u2014crossing an imaginary\ + \ line, are subjects in the next. In some States, a married woman may hold property\ + \ and transact business in her own name; in others, her earnings belong to her husband.\ + \ In some States, a woman may testify against her husband, sue and be sued in the\ + \ courts; in others, she has no redress in case of damage to person, property, or\ + \ character. In case of divorce on account of adultery in the husband, the innocent\ + \ wife is held to possess no right to children or property, unless by special decree\ + \ of the court. But in no State of the Union has the wife the right to her own person,\ + \ or to any part of the joint earnings of the co-partnership during the life of\ + \ her husband. In some States women may enter the law schools and practice in the\ + \ courts; in others they are forbidden. In some universities girls enjoy equal educational\ + \ advantages with boys, while many of the proudest institutions in the land deny\ + \ them admittance, though the sons of China, Japan and Africa are welcomed there.\ + \ But the privileges already granted in the several States are by no means secure.\"\ + \nSusan B. Anthony, \"Declaration of Rights for Women,\" July 4, 1876\nThe sentiments\ + \ expressed in the second excerpt by Susan B. Anthony are most likely in support\ + \ of\n(A) the Equal Rights Amendment (B) universal suffrage (C) states' rights (D)\ + \ prohibition\nA: Let's think step by step. We refer to Wikipedia articles on us\ + \ history for help. The above information mentioned that women are in an anomalous\ + \ position in terms of legislation. Women's earnings do not belong to themselves,\ + \ or they cannot testify against her husbands. Susan believes women should have\ + \ equal legal rights as men. The answer is (B).\n\nQ: This question refers to the\ + \ following information.\n\"Society in every state is a blessing, but government\ + \ even in its best state is but a necessary evil; in its worst state an intolerable\ + \ one; for when we suffer, or are exposed to the same miseries by a government,\ + \ which we might expect in a country without government, our calamity is heightened\ + \ by reflecting that we furnish the means by which we suffer. Government, like dress,\ + \ is the badge of lost innocence; the palaces of kings are built on the ruins of\ + \ the bowers of paradise. For were the impulses of conscience clear, uniform, and\ + \ irresistibly obeyed, man would need no other lawgiver; but that not being the\ + \ case, he finds it necessary to surrender up a part of his property to furnish\ + \ means for the protection of the rest; and this he is induced to do by the same\ + \ prudence which in every other case advises him out of two evils to choose the\ + \ least. Wherefore, security being the true design and end of government, it unanswerably\ + \ follows that whatever form thereof appears most likely to ensure it to us, with\ + \ the least expense and greatest benefit, is preferable to all others.\"\nThomas\ + \ Paine, Common Sense, 1776\nWhich of the following \"miseries\" alluded to above\ + \ were most condemned by Anti-Federalists of the post-Revolutionary era?\n(A) Organized\ + \ response to Bacon's Rebellion (B) Federal response to Shays's Rebellion (C) Federal\ + \ response to the Whiskey Rebellion (D) Federal response to Pontiac's Rebellion\n\ + A: Let's think step by step. We refer to Wikipedia articles on us history for help.\ + \ Anti-Federalists do not believe centralized government power, and suspect Washington's\ + \ military response to Whiskey Rebellion. Bacon's Rebellion and Pontiac's Rebellion\ + \ happen before the Revolution and they can be ruled out. The answer is (C)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_us_history diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_world_history.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_world_history.yaml new file mode 100644 index 00000000..1cf4bbdb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_world_history.yaml @@ -0,0 +1,82 @@ +dataset_name: high_school_world_history +description: "The following are multiple choice questions (with answers) about high\ + \ school world history.\n\nQ: This question refers to the following information.\n\ + \"At least one of the [world's] societies would have to somehow enormously increase\ + \ its productivity [in order to achieve global hegemony]. That quantum jump would\ + \ have to be made before the various scientific, technological, agricultural, and\ + \ industrial revolutions on which our post-quantum-leap world rests. It could only\ + \ be accomplished by exploiting the ecosystems, mineral resources, and human assets\ + \ of whole continents outside the lands of the society making the jump. Western\ + \ Europe did just that by means of its brutality and guns and, more important, by\ + \ geographical and ecological luck.\"\nCopyright \xA9 2015 Cambridge University\ + \ Press.\nAlfred Crosby, historian, Ecological Imperialism, 2004\nThe \"quantum\ + \ jump\" mentioned in the passage most directly contributed to which of the following\ + \ developments in the period 1450\u20131750 C.E.?\n(A) A breakdown in trade routes\ + \ through the collapse of the established state structure (B) An increase in the\ + \ population of the world through more plentiful supplies of food (C) The spread\ + \ of Chinese and Indian belief systems across the world (D) An increase in social\ + \ unrest\nA: Let's think step by step. We refer to Wikipedia articles on world history\ + \ for help. The \"quantum jump\" mentioned in the passage refers to the conquest\ + \ of the New World and the Columbian Exchange. Choice (A) and (C) did not happen\ + \ in history. Choice (C) refers to the human assets. The answer is (B).\n\nQ: This\ + \ question refers to the following information.\n\"The struggle against neo-colonialism\ + \ is not aimed at excluding the capital of the developed world from operating in\ + \ less developed countries. It is aimed at preventing the financial power of the\ + \ developed countries being used in such a way as to impoverish the less developed.\n\ + Non-alignment, as practiced by Ghana and many other countries, is based on co-operation\ + \ with all States whether they be capitalist, socialist or have a mixed economy.\ + \ Such a policy, therefore, involves foreign investment from capitalist countries,\ + \ but it must be invested in accordance with a national plan drawn up by the government\ + \ of the non-aligned State with its own interests in mind. The issue is not what\ + \ return the foreign investor receives on his investments\u2026The question is one\ + \ of power. A State in the grip of neo-colonialism is not master of its own destiny.\"\ + \nKwame Nkrumah, Neo-Colonialism, 1965\nWhich of the following provides the best\ + \ context for Nkrumah's writings?\n(A) The Industrial Revolution (B) Decolonization\ + \ (C) Regional Free Trade Associations (D) Autarky\nA: Let's think step by step.\ + \ We refer to Wikipedia articles on world history for help. The passage expresses\ + \ a point that the successful fight against neo-colonialism were in danger and the\ + \ newly independent nations like Ghana may be re-colonized via financial power of\ + \ the developed countries. The answer is (B).\n\nQ: This question refers to the\ + \ following information.\n\"Indeed, as both the fatwas of distinguished [scholars]\ + \ who base their opinion on reason and tradition alike and the consensus of the\ + \ Sunni community agree that the ancient obligation of extirpation, extermination,\ + \ and expulsion of evil innovation must be the aim of our exalted aspiration, for\ + \ \"Religious zeal is a victory for the Faith of God the Beneficent\"; then, in\ + \ accordance with the words of the Prophet (Peace upon him!) \"Whosoever introduces\ + \ evil innovation into our order must be expelled\" and \"Whosoever does aught against\ + \ our order must be expelled,\" action has become necessary and exigent\u2026\"\n\ + Letter from Ottoman Sultan Selim I to Safavid Shah Ismail I, 1514\nThe letter from\ + \ Selim I is most clearly an example of which of the following?\n(A) The maintenance\ + \ of military supremacy at all costs (B) Expanding tensions between religious sects\ + \ (C) Factors that brought about the collapse of the Ottoman Empire (D) Peacemaking\ + \ efforts among the Islamic empires\nA: Let's think step by step. We refer to Wikipedia\ + \ articles on world history for help. The passage is an example of expanding tensions\ + \ between Selim and Ismail. In the passage the Selim references the fatwa and the\ + \ consensus of the Sunni community to against whosoever introduces evil. The answer\ + \ is (B).\n\nQ: This question refers to the following information.\n\"The real grievance\ + \ of the worker is the insecurity of his existence; he is not sure that he will\ + \ always have work, he is not sure that he will always be healthy, and he foresees\ + \ that he will one day be old and unfit to work. If he falls into poverty, even\ + \ if only through a prolonged illness, he is then completely helpless, exam_ins\ + \ to his own devices, and society does not currently recognize any real obligation\ + \ towards him beyond the usual help for the poor, even if he has been working all\ + \ the time ever so faithfully and diligently. The usual help for the poor, however,\ + \ leaves a lot to be desired, especially in large cities, where it is very much\ + \ worse than in the country.\"\nOtto von Bismarck, 1884\nOtto von Bismarck likely\ + \ made this speech in reaction to which of the following issues?\n(A) Social acceptance\ + \ of child labor (B) Declining life expectancy in Germany (C) Criticisms of German\ + \ trade tariffs (D) Negative effects attributed to industrial capitalism\nA: Let's\ + \ think step by step. We refer to Wikipedia articles on world history for help.\ + \ The passage talks about the grievance of the work under the industrial capitalism.\ + \ The answer is (D).\n\nQ: This question refers to the following information.\n\ + He contains all works and desires and all perfumes and all tastes. He enfolds the\ + \ whole universe and in silence is loving to all. This is the Spirit that is in\ + \ my heart, this is Brahman. To him I shall come when I go beyond this life, and\ + \ to him will come he who has faith and doubts not.\n\u2014The Upanishads, India,\ + \ c. 1000 BCE\nTo which religion does the speaker most likely belong?\n(A) Hinduism\ + \ (B) Buddhism (C) Shintoism (D) Zoroastrianism\nA: Let's think step by step. We\ + \ refer to Wikipedia articles on world history for help. Brahman refers to the ultimate\ + \ reality of all things in the Hindu religion. In contrast, Buddhism does not have\ + \ a concept of supreme God. The answer is (A)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_high_school_world_history diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_human_aging.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_human_aging.yaml new file mode 100644 index 00000000..9d652132 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_human_aging.yaml @@ -0,0 +1,48 @@ +dataset_name: human_aging +description: 'The following are multiple choice questions (with answers) about human + aging. + + + Q: All other things being equal, which of the following persons is more likely to + show osteoporosis? + + (A) An older Hispanic American woman (B) An older African American woman (C) An + older Asian American woman (D) An older Native American woman + + A: Let''s think step by step. We refer to Wikipedia articles on human aging for + help. Although osteoporosis can occur at any age, the risk is higher for older people. + It is most common in Asian and non-Hispanic white women. The answer is (C). + + + Q: The finding that adults tend to remember events from their adolescence better + than from other periods in their lives is referred to as the + + (A) Adolescence advantage (B) Reminiscence bump (C) Memorial memorial (D) Quadratic + retrieval spike + + A: Let''s think step by step. We refer to Wikipedia articles on human aging for + help. Reminiscence bump is a phenomenon that older adults tend to recollect events + during their young ages. People usually have a period of childhood amnesia from + birth to around age 5, and a reminiscence bump between 10 and 30. The answer is + (B). + + + Q: Which element in tobacco smoke is responsible for cancers? + + (A) Nicotine (B) Tar (C) Carbon monoxide (D) Smoke particles + + A: Let''s think step by step. We refer to Wikipedia articles on human aging for + help. The benzene, acrylamide and acrylonitrile in tar interact with the lungs and + cause DNA mutations in cells of the lungs, and lead to cancer. The answer is (B). + + + Q: When older adults move to a new state after retirement, which of the following + is the more likely destination? + + (A) Texas (B) California (C) Hawaii (D) Vermont + + A: Let''s think step by step. We refer to Wikipedia articles on human aging for + help. Texas does not have state tax, and has low cost of living compared with the + other three options. The answer is (A).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_human_aging diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_human_sexuality.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_human_sexuality.yaml new file mode 100644 index 00000000..6b7a12cc --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_human_sexuality.yaml @@ -0,0 +1,61 @@ +dataset_name: human_sexuality +description: 'The following are multiple choice questions (with answers) about human + sexuality. + + + Q: The most common disorder among men who seek sexual therapy is: + + (A) premature ejaculation (B) inhibited ejaculation (C) erectile disorder (D) ejaculatory + disorder + + A: Let''s think step by step. We refer to Wikipedia articles on human sexuality + for help. The most common type of arousal disorder for men is erectile dysfunction, + meaning that a person is not able to get any physical satisfaction from sexual activity + although he may be interested in it. The answer is (C). + + + Q: A woman who knows she has active herpes and untreated syphilis but continues + to have sex without informing her partners of her condition has, in psychoanalytic + terms: + + (A) a strong ego (B) a weak superego (C) a weak id (D) a strong superego + + A: Let''s think step by step. We refer to Wikipedia articles on human sexuality + for help. A person with weak superego tends to be delinquent, criminal or have antisocial + personality. The action of the woman who knows she has active venereal disease but + still have sex with her partners indicate she may has antisocial personality. The + answer is (B). + + + Q: The nature of homosexual activities that occur during preadolescence include + all but which of the following? + + (A) sexual intercourse (B) circle jerks (C) exhibitionism (D) touching each other''s + genitals + + A: Let''s think step by step. We refer to Wikipedia articles on human sexuality + for help. Sexual intercourse is prohibited during preadolescence. The answer is + (A). + + + Q: Women''s ability to have multiple orgasms is primarily due to: + + (A) the fact that they do not have a refractory period. (B) the response of the + inner layers of the vagina. (C) having alternating orgasms in different locations. + (D) the G-Spot. + + A: Let''s think step by step. We refer to Wikipedia articles on human sexuality + for help. The refractory period is the time when a person is not able to be erect + or is not interested in sex. The answer is (A). + + + Q: Morning sickness is typically a problem: + + (A) during the first trimester (B) during the second trimester (C) during the third + trimester (D) all through the pregnancy + + A: Let''s think step by step. We refer to Wikipedia articles on human sexuality + for help. Morning sickness usually begins by nine weeks after conception, corresponding + to the first trimester. The answer is (A).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_human_sexuality diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_international_law.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_international_law.yaml new file mode 100644 index 00000000..655a39e8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_international_law.yaml @@ -0,0 +1,80 @@ +dataset_name: international_law +description: 'The following are multiple choice questions (with answers) about international + law. + + + Q: How the consent to be bound of a State may be expressed? + + (A) The consent of a State to be bound is expressed only by ratification (B) The + consent of a state to be bound by a treaty may be expressed by signature, ratification, + acceptance, approval or accession (C) The consent of a State to be bound is expressed + by signature (D) The consent of a State to be bound is expressed by whatever means + they choose + + A: Let''s think step by step. We refer to Wikipedia articles on international law + for help. Article 11 of Vienna Convention on the Law of Treaties signed in 1969 + states that "the consent of a State to be bound by a treaty may be expressed by + signature, exchange of instruments constituting a treaty, ratification, acceptance, + approval or accession, or by any other means if so agreed." (B) is the most precise + and accurate answer. The answer is (B). + + + Q: What is the judge ad hoc? + + (A) If a party to a contentious case before the ICJ does not have a national sitting + as judge, it is entitled to nominate someone as a judge solely for that case, with + the title of judge ad hoc (B) Judge ad hoc is the member of the bench of the ICJ + with a casting vote (C) Judge ad hoc is a surrogate judge, in case a judge is disqualified + or passes away (D) Judge ad hoc is the judge that each party will always nominate + in every contentious case + + A: Let''s think step by step. We refer to Wikipedia articles on international law + for help. As "ad hoc" implies, a judge ad hoc is appointed only for a specific case + or period, when a party to a contentious case before the International Court of + Justice does not have a regular national sitting as judge. The answer is (A). + + + Q: When ''consent'' can serve as a circumstance precluding the wrongfulness of a + State conduct? + + (A) Consent can serve as a circumstance precluding the wrongfulness whenever it + is given (B) Consent can never serve as a circumstance precluding wrongfulness (C) + Consent can serve as a circumstance precluding wrongfulness, provided the consent + is valid and to the extent that the conduct remains within the limits of the consent + given (D) Consent can always serve as a circumstance precluding wrongfulness, no + matter which organ of the State gives it + + A: Let''s think step by step. We refer to Wikipedia articles on international law + for help. Valid consent can serve as a circumstance precluding the wrongfulness + of a State conduct if the conduct remains within the limits of that consent, according + to Chapter V of the Responsibility of States for Internationally Wrongful Acts, + 2001, United Nations. The answer is (C). + + + Q: Would a reservation to the definition of torture in the ICCPR be acceptable in + contemporary practice? + + (A) This is an acceptable reservation if the reserving country''s legislation employs + a different definition (B) This is an unacceptable reservation because it contravenes + the object and purpose of the ICCPR (C) This is an unacceptable reservation because + the definition of torture in the ICCPR is consistent with customary international + law (D) This is an acceptable reservation because under general international law + States have the right to enter reservations to treaties + + A: Let''s think step by step. We refer to Wikipedia articles on international law + for help. For it contravenes the object and purpose of the ICCPR, this is an unacceptable + reservation in contemporary practice. The answer is (B). + + + Q: What types of force does Article 2(4) of the UN Charter prohibit? + + (A) Article 2(4) encompasses only armed force (B) Article 2(4) encompasses all types + of force, including sanctions (C) Article 2(4) encompasses all interference in the + domestic affairs of States (D) Article 2(4) encompasses force directed only against + a State''s territorial integrity + + A: Let''s think step by step. We refer to Wikipedia articles on international law + for help. Article 2(4) of the UN Charter prohibits states from using armed forces + in their international relations. The answer is (A).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_international_law diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_jurisprudence.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_jurisprudence.yaml new file mode 100644 index 00000000..7e11f0f7 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_jurisprudence.yaml @@ -0,0 +1,69 @@ +dataset_name: jurisprudence +description: 'The following are multiple choice questions (with answers) about jurisprudence. + + + Q: Iverson Jewelers wrote a letter to Miller, ''We have received an exceptionally + fine self winding Rolox watch which we will sell to you at a very favorable price.'' + + (A) The letter is an offer to sell (B) A valid offer cannot be made by letter. (C) + The letter contains a valid offer which will terminate within a reasonable time. + (D) The letter lacks one of the essential elements of an offer. + + A: Let''s think step by step. We refer to Wikipedia articles on jurisprudence for + help. An offer shows the intent to enter into a mutually-beneficial contract with + specific terms. An offer can be made by a letter. While this letter indicates the + willingness to sell, the lack of specific terms, such as transaction price and offer + expiration date, makes it an incomplete offer. The answer is (D). + + + Q: Functions of the law include all but which of the following? + + (A) maximizing individual freedom (B) providing a basis for compromise (C) keeping + the peace (D) promoting the principles of the free enterprise system + + A: Let''s think step by step. We refer to Wikipedia articles on jurisprudence for + help. Laws are fundamentally about helping resolve disputes between individuals, + and therefore essential for maximizing individual freedom, providing a basis for + compromise, and keeping the peace. The answer is (D). + + + Q: The ________ School of jurisprudence postulates that the law is based on what + is "correct." + + (A) Natural Law (B) Analytical (C) Historical (D) Sociological + + A: Let''s think step by step. We refer to Wikipedia articles on jurisprudence for + help. Natural Law School of jurisprudence focuses on the laws of nature, and states + that the law should be based on ethics, morals, and what is "correct". Analytical + deals with the law as it already exists, Historical postulates that the law was + found and not made, and Sociological studies how the law and society impact each + other. The answer is (A). + + + Q: Which word best summarizes Weber''s explanation of the development of formally + rational law? + + (A) Authority. (B) Charisma. (C) Co-operation. (D) Capitalism. + + A: Let''s think step by step. We refer to Wikipedia articles on jurisprudence for + help. Weber explained the development of formal rationality in laws as how the modern + society moved from tradition to rationality, where people decide actions based less + on how they were culturally done and more on expected utilities. How rational individuals + optimize efficiency of accomplishing tasks for higher rewards is a core principle + of Capitalism. The answer is (D). + + + Q: Which position does Rawls claim is the least likely to be adopted by the POP + (people in the original position)? + + (A) The POP would choose equality above liberty. (B) The POP would opt for the ''maximin'' + strategy. (C) The POP would opt for the ''difference principle''. (D) The POP would + reject the ''system of natural liberty.'' + + A: Let''s think step by step. We refer to Wikipedia articles on jurisprudence for + help. The POP would opt for the ''maximin'' strategy, opt for the ''difference principle'', + and reject the ''system of natural liberty'', but the POP would not choose equality + above liberty, since the POP assume both equal and free citizens. The answer is + (A).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_jurisprudence diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_logical_fallacies.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_logical_fallacies.yaml new file mode 100644 index 00000000..f6f3c359 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_logical_fallacies.yaml @@ -0,0 +1,71 @@ +dataset_name: logical_fallacies +description: 'The following are multiple choice questions (with answers) about logical + fallacies. + + + Q: When an arguer causes confusion during refutation because of real or feigned + lack of an ability to engage in refutation, that arguer may have committed the fallacy + of + + (A) poor sportsmanship (B) appeal to compassion (C) argument against the person + (D) ignorance of refutation + + A: Let''s think step by step. We refer to Wikipedia articles on logical fallacies + for help. Ignorance of refutation, one of Aristotle''s original list of logical + fallacies in his Organon, is when someone causes confusion in an argument through + real or feigned inability to engage in refutation, in order to win the argument. + The answer is (D). + + + Q: The complex question fallacy consists of + + (A) arguing something is inferior just because it doesn''t do something it was never + intended to do. (B) including more than one claim in the proposition and treating + proof for one claim as proof for all the claims. (C) drawing a conclusion before + examining the evidence, and only considering evidence that supports that conclusion. + (D) asking a question that includes either an unproven assumption or more than one + question, thus making a straightforward yes or no answer meaningless. + + A: Let''s think step by step. We refer to Wikipedia articles on logical fallacies + for help. The complex question fallacy is when someone makes a single yes or no + answer to a question meaningless, by including either an unproven assumption or + many questions. The latter is also known as the many questions fallacy. The answer + is (D). + + + Q: Arguing that what is true of the parts must be true of the whole is the fallacy + of... + + (A) Division (B) Composition (C) Appeal to the person (D) Appeal to ignorance + + A: Let''s think step by step. We refer to Wikipedia articles on logical fallacies + for help. Fallacy of composition occurs when someone argues what is true of the + parts must be true of the whole. The answer is (B). + + + Q: Which of the following is true of a valid categorical syllogism? + + (A) The minor premise must deny the antecedent (B) The major premise must affirm + the consequent (C) The middle term must be used in at least one premise in a universal + or unqualified sense (D) All of the above + + A: Let''s think step by step. We refer to Wikipedia articles on logical fallacies + for help. A valid categorical syllogism must satisfy several conditions: (1) the + syllogism must have exactly three terms (2) every term of the syllogism must be + used twice exactly, (3) a term may be used only once in any premise, and (4) the + middle term must be used in at least one premise in a universal or unqualified sense, + etc. Only (C) is true. The answer is (C). + + + Q: If someone attacks the character of an opposing arguer, instead of responding + to that opponent''s arguments, the first person has probably committed which of + the following fallacies? + + (A) tu quoque (B) horse laugh (C) argument against the person (D) ignoratio elenchi + + A: Let''s think step by step. We refer to Wikipedia articles on logical fallacies + for help. The argument against the person fallacy occurs when someone irrelevantly + attacks the character of an opposing arguer, instead of addressing that opponent''s + arguments. The answer is (C).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_logical_fallacies diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_machine_learning.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_machine_learning.yaml new file mode 100644 index 00000000..1856af53 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_machine_learning.yaml @@ -0,0 +1,59 @@ +dataset_name: machine_learning +description: "The following are multiple choice questions (with answers) about machine\ + \ learning.\n\nQ: Which image data augmentation is most common for natural images?\n\ + (A) random crop and horizontal flip (B) random crop and vertical flip (C) posterization\ + \ (D) dithering\nA: Let's think step by step. Data augmentation is used to increase\ + \ the diversity of images in the training dataset. It is important that natural\ + \ images are kept natural after being augmented. Vertical flips of images are not\ + \ natural, so (B) is false. Posterization makes the image look like a poster and\ + \ and dithering increases color depth. None of these two preserve the natural property.\ + \ The only natural data augmentation technique is (A). The answer is (A).\n\nQ:\ + \ Traditionally, when we have a real-valued input attribute during decision-tree\ + \ learning we consider a binary split according to whether the attribute is above\ + \ or below some threshold. Pat suggests that instead we should just have a multiway\ + \ split with one branch for each of the distinct values of the attribute. From the\ + \ list below choose the single biggest problem with Pat\u2019s suggestion:\n(A)\ + \ It is too computationally expensive. (B) It would probably result in a decision\ + \ tree that scores badly on the training set and a testset. (C) It would probably\ + \ result in a decision tree that scores well on the training set but badly on a\ + \ testset. (D) It would probably result in a decision tree that scores well on a\ + \ testset but badly on a training set.\nA: Let's think step by step. Because the\ + \ input is real valued, it is unlikely that the same values appear both at training\ + \ and test time. This means that while such a decision tree could yield good performance\ + \ on the training data, when evaluated on the test data it will perform badly because\ + \ the decision tree won\u2019t know what to do with numbers that did not appear\ + \ in the training data. The answer is (C).\n\nQ: You are reviewing papers for the\ + \ World\u2019s Fanciest Machine Learning Conference, and you see submissions with\ + \ the following claims. Which ones would you consider accepting?\n(A) My method\ + \ achieves a training error lower than all previous methods! (B) My method achieves\ + \ a test error lower than all previous methods! (Footnote: When regularisation parameter\ + \ \u03BB is chosen so as to minimise test error.) (C) My method achieves a test\ + \ error lower than all previous methods! (Footnote: When regularisation parameter\ + \ \u03BB is chosen so as to minimise cross-validaton error.) (D) My method achieves\ + \ a cross-validation error lower than all previous methods! (Footnote: When regularisation\ + \ parameter \u03BB is chosen so as to minimise cross-validaton error.)\nA: Let's\ + \ think step by step. In machine learning, we train with some data and fixed hyperparameters\ + \ and the training error can be arbitrarily low, so (A) can\u2019t be right. Then,\ + \ one compares different hyperparameters by selecting the model with the lowest\ + \ cross-validation error, this means that (B) and (D) are not the right procedure.\ + \ The only relevant number after these is the test error and thus (C) is the right\ + \ answer. The answer is (C).\n\nQ: A 6-sided die is rolled 15 times and the results\ + \ are: side 1 comes up 0 times; side 2: 1 time; side 3: 2 times; side 4: 3 times;\ + \ side 5: 4 times; side 6: 5 times. Based on these results, what is the probability\ + \ of side 3 coming up when using Add-1 Smoothing?\n(A) 2.0/15 (B) 1.0/7 (C) 3.0/16\ + \ (D) 1.0/5\nA: Let's think step by step. Add-1 smoothing adds the value of one\ + \ to the different counts and then normalizes the probabilities accordingly. The\ + \ counts after adding one will be: side 1 comes up 1 time; side 2: 2 times; side\ + \ 3: 3 times; side 4: 4 times; side 5: 5 times; side 6: 6 times. The number of sum\ + \ one die rolls will be 21, so the probability of drawing a three is 3/21 = 1/7.\ + \ The answer is (B).\n\nQ: To achieve an 0/1 loss estimate that is less than 1 percent\ + \ of the true 0/1 loss (with probability 95%), according to Hoeffding's inequality\ + \ the IID test set must have how many examples?\n(A) around 10 examples (B) around\ + \ 100 examples (C) between 100 and 500 examples (D) more than 1000 examples\nA:\ + \ Let's think step by step. By the Hoeffding\u2019s inequality, we expect that with\ + \ 95% probability the in-sample and out-of-sample errors differ by epsilon when\ + \ we have N samples if 2 exp(-2 epsilon^2 N)<0.05, this implies that N > -1/(2*epsilon**2)\ + \ log ( 0.05/2 )= log (40)*5000. Since log(40)>1, we have that one needs more than\ + \ 1000 examples. The answer is (D)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_machine_learning diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_management.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_management.yaml new file mode 100644 index 00000000..db2f9642 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_management.yaml @@ -0,0 +1,54 @@ +dataset_name: management +description: 'The following are multiple choice questions (with answers) about management. + + + Q: How can organisational structures that are characterised by democratic and inclusive + styles of management be described? + + (A) Hierarchical (B) Bureaucratic (C) Flat (D) Functional + + A: Let''s think step by step. We refer to Wikipedia articles on management for help. + Flat organizational structures are characterized by democratic and inclusive styles + of management, and have few (if any) levels of management between the workers and + managers. The answer is (C). + + + Q: Hygiene factors are associated with which writer? + + (A) Frederick Hertzberg (B) D.C. McClelland (C) Abraham Maslow (D) Douglas McGregor + + A: Let''s think step by step. We refer to Wikipedia articles on management for help. + Hygiene factors include compensation, company policies, supervision, interpersonal + relations, and work environments. Hertzberg lists them as factors that cannot motivate + employees but can minimize job dissatisfaction. The answer is (A). + + + Q: What characteristic is not a key feature of the ''open systems'' model of management? + + (A) Morale (B) Innovation (C) Growth resource (D) Adaptation + + A: Let''s think step by step. We refer to Wikipedia articles on management for help. + The key characteristics of an open system in management include innovation, growth + resource, and adaption, but do not include morale. The answer is (A). + + + Q: Which element of the cultural web forms regalia? + + (A) Symbols (B) Rituals and routines (C) Power structures (D) Control systems + + A: Let''s think step by step. We refer to Wikipedia articles on management for help. + The cultural web is a tool for mapping an organization''s culture, where symbols + form the regalia that visually expresses the values that the organization holds + as important. The answer is (A). + + + Q: What are the two main dimensions of the Ohio Studies into leadership? + + (A) Starting position and end position (B) Initial environment and changed environment + (C) Organisational structure and conditioning (D) Initiating structure and considerations + + A: Let''s think step by step. We refer to Wikipedia articles on management for help. + The Ohio State Leadership Studies conducted in the 1940s identified initiating structure + and consideration as the two main dimensions of leader behavior. The answer is (D).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_management diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_marketing.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_marketing.yaml new file mode 100644 index 00000000..5dd683da --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_marketing.yaml @@ -0,0 +1,66 @@ +dataset_name: marketing +description: 'The following are multiple choice questions (with answers) about marketing. + + + Q: Although the content and quality can be as controlled as direct mail, response + rates of this medium are lower because of the lack of a personal address mechanism. + This media format is known as: + + (A) Care lines. (B) Direct mail. (C) Inserts. (D) Door to door. + + A: Let''s think step by step. We refer to Wikipedia articles on marketing for help. + Door to door marketing delivers non-addressed items within all buildings within + a geographic area. While it can control the content and quality as well as direct + mail marketing, its response rate is lower because of the lack of a personal address + mechanism. The answer is (D). + + + Q: In an organization, the group of people tasked with buying decisions is referred + to as the _______________. + + (A) Outsourcing unit. (B) Procurement centre. (C) Chief executive unit. (D) Decision-making + unit. + + A: Let''s think step by step. We refer to Wikipedia articles on marketing for help. + In an organization, the group of the people tasked with buying decision is referred + to as the decision-making unit. The answer is (D). + + + Q: The single group within society that is most vulnerable to reference group influence + is: + + (A) The older consumer who feels somewhat left out of things. (B) The married women, + many of whom feel a need for stability in their lives. (C) New immigrants who really + want to assimilate into their new culture. (D) Children, who base most of their + buying decisions on outside influences. + + A: Let''s think step by step. We refer to Wikipedia articles on marketing for help. + Children, who mostly based their buying decisions on outside influences, are the + single group within society that is more vulnerable to reference group influence. + The answer is (D). + + + Q: Which of the following is an assumption in Maslow''s hierarchy of needs? + + (A) Needs are dependent on culture and also on social class. (B) Lower-level needs + must be at least partially satisfied before higher needs can affect behaviour. (C) + Needs are not prioritized or arranged in any particular order. (D) Satisfied needs + are motivators, and new needs emerge when current needs remain unmet. + + A: Let''s think step by step. We refer to Wikipedia articles on marketing for help. + Maslow''s hierarchy of needs, from the bottom upwards, are physiological (food and + clothing), safety, love and belonging needs, esteem, and self-actualization. Lower-level + needs must be at least partially satisfied before higher ones can affect behavior. + The answer is (B). + + + Q: _____________ is a natural outcome when combining demographic and geographic + variables. + + (A) Geodemographics (B) Product differentiation. (C) ANSOFF matrix. (D) Brand management. + + A: Let''s think step by step. We refer to Wikipedia articles on marketing for help. + Geodemographics is a natural outcome when combining demographic and geographic variables. + The answer is (A).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_marketing diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_medical_genetics.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_medical_genetics.yaml new file mode 100644 index 00000000..ebf699aa --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_medical_genetics.yaml @@ -0,0 +1,61 @@ +dataset_name: medical_genetics +description: 'The following are multiple choice questions (with answers) about medical + genetics. + + + Q: The stage of meiosis in which chromosomes pair and cross over is: + + (A) prophase I (B) metaphase I (C) prophase II (D) metaphase II + + A: Let''s think step by step. We refer to Wikipedia articles on medical genetics + for help. Prophase I is the stage of meiosis where homologous chromosomes pair with + each other and exchange genetic material. The answer is (A). + + + Q: DNA ligase is + + (A) an enzyme that joins fragments in normal DNA replication (B) an enzyme of bacterial + origin which cuts DNA at defined base sequences (C) an enzyme that facilitates transcription + of specific genes (D) an enzyme which limits the level to which a particular nutrient + reaches + + A: Let''s think step by step. We refer to Wikipedia articles on medical genetics + for help. DNA ligase is a type of enzyme (EC 6.5.1.1) responsible for joining DNA + strands together by catalyzing a phosphodiester bond. The answer is (A). + + + Q: Which of the following conditions does not show multifactorial inheritance? + + (A) Pyloric stenosis (B) Schizophrenia (C) Spina bifida (neural tube defects) (D) + Marfan syndrome + + A: Let''s think step by step. We refer to Wikipedia articles on medical genetics + for help. Multifactorial inheritance is when more than a single factor is responsible + for causing a given trait or health problem. Genes cannot be the only factor. Marfan + syndrome, on the other hand, requires only one abnormal copy of the of the Marfan + gene, from one parent, to inherit the trait. The answer is (D). + + + Q: A gene showing codominance + + (A) has both alleles independently expressed in the heterozygote (B) has one allele + dominant to the other (C) has alleles tightly linked on the same chromosome (D) + has alleles expressed at the same time in development + + A: Let''s think step by step. We refer to Wikipedia articles on medical genetics + for help. Codominance, as it relates to genetics, refers to a type of genetic inheritance + where the phenotype of both the parents is easily observed in the offspring. A heterozygote + is an individual having two different alleles of a gene. The answer is (A). + + + Q: Large triplet repeat expansions can be detected by: + + (A) polymerase chain reaction. (B) single strand conformational polymorphism analysis. + (C) Southern blotting. (D) Western blotting. + + A: Let''s think step by step. We refer to Wikipedia articles on medical genetics + for help. A Southern blot is a method in molecular biology for detecting specific + DNA sequences in a sample. Large triplet repeat expansions are usually detected + with this method. The answer is (C).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_medical_genetics diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_miscellaneous.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_miscellaneous.yaml new file mode 100644 index 00000000..a506e940 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_miscellaneous.yaml @@ -0,0 +1,54 @@ +dataset_name: miscellaneous +description: 'The following are multiple choice questions (with answers) about miscellaneous. + + + Q: Which of these songs was a Top 10 hit for the rock band The Police? + + (A) ''Radio Ga-Ga'' (B) ''Ob-la-di Ob-la-da'' (C) ''De Do Do Do De Da Da Da'' (D) + ''In-a-Gadda-Da-Vida'' + + A: Let''s think step by step. We refer to Wikipedia for help. Radio Ga-Ga is by + Queen. Ob-la-di Ob-la-da is by The Beatles. And In-a-Gadda-Da-Vida is by Iron Butterfly. + Leaving ''De Do Do Do De Da Da Da'' as the only song by The Police, and also a Top + 10 hit. The answer is (C). + + + Q: What place is named in the title of the 1979 live album by rock legends Cheap + Trick? + + (A) Budapest (B) Budokan (C) Bhutan (D) Britain + + A: Let''s think step by step. We refer to Wikipedia for help. Nippon Budokan is + an indoor arena in Tokyo, Japan renowned for hosting rock music concerts including + Cheap Trick in 1978. ''Cheap Trick at Budokan'' became the name of their album. + The answer is (B). + + + Q: What is produced during photosynthesis? + + (A) hydrogen (B) nylon (C) oxygen (D) light + + A: Let''s think step by step. We refer to Wikipedia for help. Photosynthesis is + the process in which green plants use the green pigment chlorophyll to synthesize + foods with water and carbon dioxide. Oxygen is the byproduct of this process. The + answer is (C). + + + Q: Who is the shortest man to ever win an NBA slam dunk competition? + + (A) Anthony ''Spud'' Webb (B) Michael ''Air'' Jordan (C) Tyrone ''Muggsy'' Bogues + (D) Julius ''Dr J'' Erving + + A: Let''s think step by step. We refer to Wikipedia for help. In 1986, Spud Webb, + standing only 5''7" became the shortest NBA player in history to win an official + slam dunk contest. The answer is (A). + + + Q: How many axles does a standard automobile have? + + (A) one (B) two (C) four (D) eight + + A: Let''s think step by step. We refer to Wikipedia for help. Most cars have two + axles to rotate the wheels.. The answer is (B).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_miscellaneous diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_moral_disputes.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_moral_disputes.yaml new file mode 100644 index 00000000..fdde3633 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_moral_disputes.yaml @@ -0,0 +1,72 @@ +dataset_name: moral_disputes +description: 'The following are multiple choice questions (with answers) about moral + disputes. + + + Q: Baron admits that the versions of the ticking bomb hypothetical she discusses + are "stunningly stupid," but she claims this is actually evidence of + + (A) the stupidity of most traditional philosophical examples. (B) a general lack + of intelligence among people with advanced degrees. (C) the wrongness of torture. + (D) the readiness on the part of many intelligent people to see torture as the best + solution to deal with terrorism. + + A: Let''s think step by step. We refer to Wikipedia articles on moral disputes for + help. The ticking bomb hypothetical poses a problem where many people will die to + an exploding bomb, if the hypothetical terrorist does not disclose how to defuse + it. Baron sees this hypothetical as silly, but its prevalence does suggest intelligent + people, particularly utilitarians, see torture as justifiable to save the lives + in this scenario. The answer is (D). + + + Q: A fertilized ovum is also known as + + (A) a zygote. (B) an embryo. (C) a viability. (D) a blastocyst. + + A: Let''s think step by step. We refer to Wikipedia articles on moral disputes for + help. Once a single sperm penetrates the layers of an egg to form a new cell, that + cell is called a zygote. The answer is (A). + + + Q: Pence compares six different cases of reproduction, from natural twinning to + SCNT. What conclusion does he draw from this comparison? + + (A) SCNT is not a different kind of reproduction because there are no morally relevant + differences between it and other permissible means of reproduction. (B) Because + there is a low risk of harm for natural twinning, there will be a low risk of harm + for SCNT. (C) both A and B (D) neither A nor B + + A: Let''s think step by step. We refer to Wikipedia articles on moral disputes for + help. SCNT, known as ''somatic cell nuclear transfer'', is a technique in which + the nucleus of a somatic (body) cell is transferred to the cytoplasm of an enucleated + egg. This is used in farm animal cloning. Pence may compare this to other processes + of reproduction, and see little moral separation. The answer is (A). + + + Q: Which of the following is an intrinsic good, according to Singer? + + (A) being in the upper class (B) the ability to learn and solve problems (C) being + more beautiful than most of the population (D) being shorter than average + + A: Let''s think step by step. We refer to Wikipedia articles on moral disputes for + help. Bioethicist Peter Singer sees intrinsic value as innate values conferred by + oneself, for oneself. Innanimate objects can be beautiful, short, or have some valuable + criteria, but capabilities are intrinsically good. The answer is (B). + + + Q: According to Metz, what is wrong with consequentialist arguments against capital + punishment based on African values? + + (A) It is unclear as of yet whether or not capital punishment deters harm to the + community. (B) It is unclear as of yet whether or not capital punishment deters + harm to any individuals. (C) Consequentialism is not supported by African values. + (D) Even though consequentialism is supported by African values, no consequentialist + arguments framed in terms of African values have been offered. + + A: Let''s think step by step. We refer to Wikipedia articles on moral disputes for + help. Thaddeus Metz is a humanities research professor focusing on ethical philosophy + in South Africa. Metz has written the death penalty is unjustified as it treats + individuals as incapable of communal relations. It is unclear that capital punishment + is to the benefit of, or a deterrent of harm to the community. The answer is (A).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_moral_disputes diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_moral_scenarios.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_moral_scenarios.yaml new file mode 100644 index 00000000..4c82f5d5 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_moral_scenarios.yaml @@ -0,0 +1,66 @@ +dataset_name: moral_scenarios +description: 'The following are multiple choice questions (with answers) about moral + scenarios. + + + Q: For which of these two scenarios does the main character (who uses I/me/my) do + something clearly morally wrong, according to ordinary moral standards in the US + as of 2020? Scenario 1 | I took his wallet after he harassed me at the bar. Scenario + 2 | I made love to my newborn baby boy. + + (A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong + + A: Let''s think step by step. We refer to Wikipedia articles on moral scenarios + for help. Stealing from someone is not considered an ethical response to harrassment. + And having sex with a minor is egregiously unethical. The answer is (A). + + + Q: For which of these two scenarios does the main character (who uses I/me/my) do + something clearly morally wrong, according to ordinary moral standards in the US + as of 2020? Scenario 1 | I asked for a cigarette from the guy outside the store. + Scenario 2 | When the security guard left his post, I pocketed a rare coin. + + (A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong + + A: Let''s think step by step. We refer to Wikipedia articles on moral scenarios + for help. Asking for something is not morally wrong. Stealing is morally wrong. + The answer is (C). + + + Q: For which of these two scenarios does the main character (who uses I/me/my) do + something clearly morally wrong, according to ordinary moral standards in the US + as of 2020? Scenario 1 | During the house''s demolition, I pushed him into a wall. + Scenario 2 | When I choose a babysitter for my twin nieces, I don''t care about + her racial background. + + (A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong + + A: Let''s think step by step. We refer to Wikipedia articles on moral scenarios + for help. Physical violence is morally wrong. Being open to hiring people from different + racial backgrounds is an expected moral stance. The answer is (B). + + + Q: For which of these two scenarios does the main character (who uses I/me/my) do + something clearly morally wrong, according to ordinary moral standards in the US + as of 2020? Scenario 1 | I bought an old pick-up truck for work. Scenario 2 | I + looked up at the gorgeous waterfall and threw a rock at it. + + (A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong + + A: Let''s think step by step. We refer to Wikipedia articles on moral scenarios + for help. Neither of these are wrong as no one is wronged by buing a pick-up truck + or throwing a rock into a stream. The answer is (D). + + + Q: For which of these two scenarios does the main character (who uses I/me/my) do + something clearly morally wrong, according to ordinary moral standards in the US + as of 2020? Scenario 1 | I really loved my newborn baby boy. Scenario 2 | The new + girl is embarrassed to live in a trailer park, so I invite the class to her house. + + (A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong + + A: Let''s think step by step. We refer to Wikipedia articles on moral scenarios + for help. Loving someone is not wrong. However, exposing something that someone + is embarrassed about could be considered quite mean. The answer is (C).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_moral_scenarios diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_nutrition.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_nutrition.yaml new file mode 100644 index 00000000..eae79250 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_nutrition.yaml @@ -0,0 +1,72 @@ +dataset_name: nutrition +description: 'The following are multiple choice questions (with answers) about nutrition. + + + Q: What is the first-line drug for patients with type 2 diabetes and obesity, as + of 2020? + + (A) Acarbose (B) Metformin (C) Sulphonylureas (D) Insulin + + A: Let''s think step by step. We refer to Wikipedia articles on nutrition for help. + Metformin (Fortamet, Glumetza, or others) is usually the first medication prescribed + for type 2 diabetes, as well as obesity. It works by lowering glucose production + in the liver and improving the body''s sensitivity to insulin. The answer is (B). + + + Q: Which of the following statements is correct (according to knowledge in 2020)? + + (A) Consumers with phenylketonuria must avoid the consumption of the sweetener aspartame + (B) Consumers with phenylketonuria must avoid the consumption of the sweetener saccharin + (C) Consumers with phenylketonuria must avoid the consumption of the sweetener sucralose + (D) Consumers with phenylketonuria must avoid the consumption of the sweetener acesulfame + K + + A: Let''s think step by step. We refer to Wikipedia articles on nutrition for help. + People with phenylketonuria (PKU) cannot break down the amino acid phenylalanine. + As it builds up in the blood and brain it can lead to brain damage. People with + PKU should avoid foods that are converted to phenylalanine in the body, such as + aspartame. The answer is (A). + + + Q: Which of the following statements about iodine is correct, as of 2020? + + (A) 50% of adults consume iodine at levels below the RNI (B) Dairy products are + a poor source of iodine (C) The iodine content of organic milk is generally lower + that the level in non-organic milk (D) UK dietary reference values recommend an + increase in iodine intake in pregnancy + + A: Let''s think step by step. We refer to Wikipedia articles on nutrition for help. + Organic milk usually has less iodine content than non-organic milk. The answer is + (C). + + + Q: Which of the following is the most plausible explanation for the protective effect + of dietary fibre against cancer of the colon, as of 2020? + + (A) Propionic acid, formed during colonic fibre fermentation inhibits liver fatty + acid synthesis (B) Butyric acid, formed during colonic fibre fermentation stimulates + "silencing" of the SLC5A8 tumour suppressor gene (C) None of these options are correct + (D) Butyric acid, formed during colonic fibre fermentation stimulates anti-oxidant + defences in the colon + + A: Let''s think step by step. We refer to Wikipedia articles on nutrition for help. + Dietary fibre is inversely proportional to the risk of colorectal cancer. This is + presumed because butyric acid (BA) stimulates antioxidants which help protect the + colon from cancerous tumors. The answer is (D). + + + Q: In a cohort study, the risk ratio of developing diabetes was 0.86 when comparing + consumers of tea (the exposed) to those who did not drink tea (the unexposed). Which + one statement is correct (according to knowledge in 2020)? + + (A) The tea drinkers have lower risk of developing diabetes. (B) The tea drinkers + have higher risk of developing diabetes. (C) Based on the information given we cannot + tell if the observed difference in disease risk is the result of chance. (D) The + risk ratio is close to the value one, so there is no difference in disease risk + between the two groups. + + A: Let''s think step by step. We refer to Wikipedia articles on nutrition for help. + The risk ratio is not sufficiently reduced that it could not be explained by random + chance given the studies sample size. The answer is (C).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_nutrition diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_philosophy.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_philosophy.yaml new file mode 100644 index 00000000..60ce6c54 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_philosophy.yaml @@ -0,0 +1,30 @@ +dataset_name: philosophy +description: "The following are multiple choice questions (with answers) about philosophy.\n\ + \nQ: The study of reality in the broadest sense, an inquiry into the elemental nature\ + \ of the universe and the things in it, is known as _____.\n(A) metaphysics (B)\ + \ epistemology (C) quantum physics (D) axiology\nA: Let's think step by step. We\ + \ refer to Wikipedia articles on philosophy for help. Among the options, only metaphysics\ + \ studies the nature of reality and existence. The answer is (A).\n\nQ: According\ + \ to Moore\u2019s \u201Cideal utilitarianism,\u201D the right action is the one\ + \ that brings about the greatest amount of:\n(A) pleasure. (B) happiness. (C) good.\ + \ (D) virtue.\nA: Let's think step by step. We refer to Wikipedia articles on philosophy\ + \ for help. Moore's \"ideal utilitarianism\" states that one's actions should maximize\ + \ intrinsic goods. The answer is (C).\n\nQ: Before Tolstoy's Christian conversion,\ + \ what was his perspective on the meaning of life?\n(A) optimist (B) satisfied (C)\ + \ nominally religious (D) pessimist\nA: Let's think step by step. We refer to Wikipedia\ + \ articles on philosophy for help. Before his conversion, Tolstoy feels that life\ + \ was uncertain, which is a pessimist's point of view. The answer is (D).\n\nQ:\ + \ According to d'Holbach, people always act according to _____.\n(A) free choices\ + \ (B) dictates of the soul (C) necessary natural laws (D) undetermined will\nA:\ + \ Let's think step by step. We refer to Wikipedia articles on philosophy for help.\ + \ d'Holbach believes that people act according to necessary laws, and it proves\ + \ nothing about people's free will. The answer is (C).\n\nQ: Psychological egoism\ + \ is:\n(A) an ethical theory about how we ought to behave. (B) a generalization\ + \ concerning the way people tend to behave. (C) a claim about human nature and the\ + \ ways people are capable of behaving. (D) none of the above.\nA: Let's think step\ + \ by step. We refer to Wikipedia articles on philosophy for help. Psychological\ + \ egoism suggests that one behaves based on what makes one feels good, hence it\ + \ is a claim about human nature and how humans are capable of behaving. The answer\ + \ is (C)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_philosophy diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_prehistory.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_prehistory.yaml new file mode 100644 index 00000000..e1c8dcc6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_prehistory.yaml @@ -0,0 +1,67 @@ +dataset_name: prehistory +description: 'The following are multiple choice questions (with answers) about prehistory. + + + Q: What is the approximate mean cranial capacity of Homo erectus? + + (A) under 650 cc (B) about 800 cc (C) just under 1000 cc (D) 1200 cc + + A: Let''s think step by step. We refer to Wikipedia articles on prehistory for help. + The average cranium capacity of Homo erectus is less than 1000 cubic cm. The answer + is (C). + + + Q: According to Timothy Pauketat, the evidence for social stratification and political + power at Cahokia suggests: + + (A) a center of Mississippian civilization with conditions similar to the rise of + early states. (B) the limitations of authority in a Native American society of egalitarian + foragers. (C) a simple chiefdom or perhaps a complex chiefdom had evolved by A.D. + 1500. (D) a center of Mississippian civilization with conditions similar to societies + on the Northwest Coast of North America. + + A: Let''s think step by step. We refer to Wikipedia articles on prehistory for help. + Timothy Pauketat is known for his research on Cahokia, the center of the Mississippian + culture, where he found similar conditions to the rise of early states. The answer + is (A). + + + Q: Recent research on hominid species dating from the Middle Pliocene indicates + there was (as of 2020): + + (A) a great amount of species diversity, or a single species that exhibited a lot + of diversity. (B) very little species diversity during this period and very few + hominids. (C) decreased species diversity due to a prolonged ice age followed by + a severe drought. (D) decreased species diversity but increased numbers of hammerstones + and flakes, indicating stone tool manufacture. + + A: Let''s think step by step. We refer to Wikipedia articles on prehistory for help. + Recent research has recognized multiple hominid species from the Middle Pliocene, + meaning that there is a great amount of species diversity or diversity in a single + species. The answer is (A). + + + Q: Researchers now believe that the decline of the Maya was caused chiefly by: + + (A) a cataclysm of some kind, such as an earthquake, volcano, or tsunami. (B) ecological + degradation resulting from slash-and-burn farming techniques. (C) endless wars between + neighboring Mayan city-states. (D) practices of interbreeding that led to a steep + rise in congenital disorders. + + A: Let''s think step by step. We refer to Wikipedia articles on prehistory for help. + Researchers believe that the Maya collapse was mainly caused by over-exploitation + of natural resources like the slash-and-burn farming techniques. The answer is (B). + + + Q: The great Mayan king Pacal built temples in the city of Palenque in order to: + + (A) satisfy the powerful Mayan astronomer priests. (B) display his generosity to + the common people, since they were allowed to live in the temples. (C) frighten + away enemies, in particular the Spaniards. (D) legitimize his kingship, since his + father was not royal. + + A: Let''s think step by step. We refer to Wikipedia articles on prehistory for help. + Pacal built the temples as the funerary monument to legitimize his kingship. The + answer is (D).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_prehistory diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_accounting.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_accounting.yaml new file mode 100644 index 00000000..c4957a1f --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_accounting.yaml @@ -0,0 +1,47 @@ +dataset_name: professional_accounting +description: "The following are multiple choice questions (with answers) about professional\ + \ accounting.\n\nQ: An auditor traces the serial numbers on equipment to a nonissuer\u2019\ + s subledger. Which of the following management assertions is supported by this test?\n\ + (A) Valuation and allocation (B) Completeness (C) Rights and obligations (D) Presentation\ + \ and disclosure\nA: Let's think step by step. We refer to Wikipedia articles on\ + \ accounting for help. The completeness assertion is tested by tracing supporting\ + \ documents to the record entries. The answer is (B).\n\nQ: One hundred years ago,\ + \ your great-great-grandmother invested $100 at 5% yearly interest. What is the\ + \ investment worth today?\n(A) $13,000 (B) $600 (C) $15,000 (D) $28,000\nA: Let's\ + \ think step by step. We refer to Wikipedia articles on accounting for help. A $100\ + \ investment at 5% yearly interest is worth 100*(1.05)^100=13150 after 100 years,\ + \ which is around $13,000. The answer is (A).\n\nQ: On January 1, year 1, Alpha\ + \ Co. signed an annual maintenance agreement with a software provider for $15,000\ + \ and the maintenance period begins on March 1, year 1. Alpha also incurred $5,000\ + \ of costs on January 1, year 1, related to software modification requests that\ + \ will increase the functionality of the software. Alpha depreciates and amortizes\ + \ its computer and software assets over five years using the straight-line method.\ + \ What amount is the total expense that Alpha should recognize related to the maintenance\ + \ agreement and the software modifications for the year ended December 31, year\ + \ 1?\n(A) $5,000 (B) $13,500 (C) $16,000 (D) $20,000\nA: Let's think step by step.\ + \ We refer to Wikipedia articles on accounting for help. The maintenance period\ + \ begins on March 1, so only 10 months of expenses should be recognized, which is\ + \ $15,000/12*10=$12,500. The software modification cost is amortized over 5 years,\ + \ so each year is $5,000/5=$1,000. So the total expense is $12,500+$1,000=$13,500.\ + \ The answer is (B).\n\nQ: Krete is an unmarried taxpayer with income exclusively\ + \ from wages. By December 31, year 1, Krete's employer has withheld $16,000 in federal\ + \ income taxes and Krete has made no estimated tax payments. On April 15, year 2,\ + \ Krete timely filed for an extension request to file her individual tax return,\ + \ and paid $300 of additional taxes. Krete's year 1 tax liability was $16,500 when\ + \ she timely filed her return on April 30, year 2, and paid the remaining tax liability\ + \ balance. What amount would be subject to the penalty for underpayment of estimated\ + \ taxes?\n(A) $0 (B) $500 (C) $1,650 (D) $16,500\nA: Let's think step by step. We\ + \ refer to Wikipedia articles on accounting for help. The tax due after withholding\ + \ is $16,500-$16,000=$500, which is less than $1000, hence there is no underpayment\ + \ penalty of estimated taxes. The answer is (A).\n\nQ: Box a nongovernmental not-for-profit\ + \ organization had the following transactions during the year: Proceeds from sale\ + \ of investments $80000 Purchase of property plant and equipment $10000 Proceeds\ + \ from long-term debt $100000 Loss on sale of investment $5000 What amount should\ + \ be reported as net cash provided by financing activities in Box's statement of\ + \ cash flows?\n(A) $70,000 (B) $75,000 (C) $80,000 (D) 100000\nA: Let's think step\ + \ by step. We refer to Wikipedia articles on accounting for help. Among the four\ + \ transactions, only Proceeds from long-term debt belongs to the financing activities\ + \ section of cashflow, hence the amount reported should be $100000. The answer is\ + \ (D)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_professional_accounting diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_law.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_law.yaml new file mode 100644 index 00000000..f3a957db --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_law.yaml @@ -0,0 +1,105 @@ +dataset_name: professional_law +description: "The following are multiple choice questions (with answers) about professional\ + \ law.\n\nQ: A son owed a creditor $5,000. The son's father contacted the creditor\ + \ and told him that he wanted to pay the son's debt. The father signed a document\ + \ that stated the father would pay the son's debt at a rate of $500 a month for\ + \ 10 months. The creditor made no written or oral commitment to forbear to sue the\ + \ son to collect the $5,000 debt, and the father made no oral or written request\ + \ for any such forbearance. For the next five months, the father made and the creditor\ + \ accepted the $500 monthly payments as agreed. During that period, the creditor,\ + \ in fact, did forbear to take any legal action against the son. However, the father\ + \ then informed the creditor that he would make no further payments on the debt.\ + \ Which of the following is the most persuasive argument that the father is liable\ + \ to the creditor under the terms of their agreement?\n(A) The father's promise\ + \ and the creditor's reliance thereon, if proved, gave rise to a valid claim by\ + \ the creditor against the father based on the doctrine of promissory estoppel.\ + \ (B) Because it was foreseeable that the father's promise would induce the creditor\ + \ to forbear taking any action against the son, such forbearance was, as a matter\ + \ of law, a bargained-for consideration for the father's promise. (C) The father's\ + \ five payments to the creditor totaling $2,500 manifested a serious intent on the\ + \ father's part to be contractually bound, and such manifestation is generally recognized\ + \ as an effective substitute for consideration. (D) By assuming the antecedent debt\ + \ obligation that the son owed to the creditor, the father became a surety whose\ + \ promise to the creditor was enforceable, since it was in writing and supported\ + \ by adequate consideration. \nA: Let's think step by step. We refer to Wikipedia\ + \ articles on law for help. The doctrine of promissory estoppel stops a person from\ + \ going back on a promise in contract law, hence option (A) should be the most persuasive\ + \ argument. The answer is (A).\n\nQ: A state has recently enacted a statute prohibiting\ + \ the disposal of any nuclear wastes within the state. This law does not contravene\ + \ or conflict with any federal statutes. A man operates a company in the state that\ + \ is engaged in the disposal of nuclear wastes. Subsequent to the passage of the\ + \ state statute, the man, not yet aware of the new law, entered into contracts with\ + \ many out-of-state firms to dispose of their nuclear wastes in the state. On account\ + \ of this new law, however, the man will be unable to perform these contracts. Assume\ + \ that the man has standing to challenge this state law. Which of the following\ + \ presents his strongest constitutional grounds to challenge the state law prohibiting\ + \ the disposal of nuclear wastes within the state?\n(A) The commerce clause. (B)\ + \ The equal protection clause of the Fourteenth Amendment. (C) The privileges and\ + \ immunities clause of Article IV, Section 2. (D) The contract clause.\nA: Let's\ + \ think step by step. We refer to Wikipedia articles on law for help. The commerce\ + \ clause states that Congress shall have the power to regulate commerce with foreign\ + \ Nations, and among the several States, and with the Indian Tribes. The statute\ + \ affects inter-state commerce which puts it into question. Hence the man's strongest\ + \ argument should be the commerce clause. The answer is (A).\n\nQ: On October 1,\ + \ 1980, a developer, owner of several hundred acres in a rural county, drafted a\ + \ general development plan for the area. The duly recorded plan imposed elaborate\ + \ limitations and restrictions upon the land in the plan, which was to be developed\ + \ as a residential district. The restrictions were to extend to all persons acquiring\ + \ any of the lots and to their heirs, assigns, and lessees. It was further provided\ + \ that all subsequent owners would be charged with due notice of the restrictions.\ + \ Among those restrictions in the general plan were the following:(22) A franchise\ + \ right is created in a strip of land 10 feet in width along the rear of each lot\ + \ for the use of public utility companies with right of ingress and egress. (23)\ + \ No house or structure of any kind shall be built on the aforementioned strip of\ + \ land running through the said blocks. In 2000, a retiree purchased one of the\ + \ lots, built a house, and erected a fence in the rear of his property within the\ + \ restricted area. In 2004, a teacher purchased a lot adjacent to the retiree's\ + \ property and built a new house. Two years later, a librarian purchased the lot\ + \ that adjoined the teacher's property. The three deeds to those properties each\ + \ contained references to the deed book where the general plan was recorded. In\ + \ 2008, the librarian began the construction of a seven-foot post-and-rail fence\ + \ along the line dividing his lot with the teacher's, and along the center of the\ + \ area subject to the franchise right. Although the teacher objected to its construction,\ + \ the fence was completed. If the teacher seeks a mandatory injunction to compel\ + \ removal of the librarian's fence, the court will most likely\n(A) grant relief,\ + \ because the fence was in violation of the easement restriction. (B) grant relief,\ + \ because the encroachment of the fence violated the restriction in the original\ + \ plan. (C) deny relief, because the teacher failed to enforce the restriction against\ + \ the retiree. (D) deny relief, because the fence would not be construed as \"a\ + \ structure\" within the terms of the restriction. \nA: Let's think step by step.\ + \ We refer to Wikipedia articles on law for help. The restrictions in the original\ + \ plan say no house or structure of any kind shall be built on the aforementioned\ + \ strip of land running through the said blocks. Hence the court will most likely\ + \ grant relief because the fence violated the restriction in the original plan.\ + \ The answer is (B).\n\nQ: Judge took judicial notice of some facts at the beginning\ + \ of the trial. Which of the following is not an appropriate kind of fact for judicial\ + \ notice?\n(A) Indisputable facts. (B) Facts that have been asserted by individual\ + \ political organizations. (C) Facts recognized to be true by common knowledge.\ + \ (D) Facts capable of scientific verification.\nA: Let's think step by step. We\ + \ refer to Wikipedia articles on law for help. Among the options, facts that have\ + \ been asserted by individual political organizations is not an appropriate kind\ + \ of fact for judicial notice. The answer is (B).\n\nQ: A state legislature has\ + \ recently enacted a statute making it a misdemeanor to curse or revile or use obscene\ + \ or opprobrious language toward or in reference to a police officer perfonning\ + \ his duties. A student at a state university organized a demonstration on campus\ + \ to protest the war. The rally was attended by a group of 50 students who shouted\ + \ anti-war messages at cars passing by. To show his contempt for the United States,\ + \ the student sewed the American flag to the rear of his jeans. When a police officer\ + \ saw the flag sown on the student's jeans, he approached and told him to remove\ + \ the flag or he would be placed under arrest. The student became angered and shouted\ + \ at the police officer, \"Listen, you bastard, I'll wear this rag anywhere I please.\ + \ \" The student was subsequently placed under arrest and charged with violating\ + \ the state statute. The student subsequently brings suit in state court challenging\ + \ the constitutionality of the statute. The strongest constitutional argument for\ + \ the student is that\n(A) the statute is void for vagueness under the Fourteenth\ + \ Amendment's due process clause. (B) the statute is invalid because it violates\ + \ the petitioner's freedom of speech under the First Amendment. (C) the statute\ + \ is an abridgment of freedom of speech under the First Amendment because less restrictive\ + \ means are available for achieving the same purpose. (D) the statute is overbroad\ + \ and consequently invalid under the First and Fourteenth Amendments.\nA: Let's\ + \ think step by step. We refer to Wikipedia articles on law for help. The Fourteenth\ + \ Amendment further supports the First Amendment by establishing a due process clause.\ + \ Hence the strongest argument should be the statute is overbroad and consequently\ + \ invalid under the First and Fourteenth Amendments. The answer is (D)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_professional_law diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_medicine.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_medicine.yaml new file mode 100644 index 00000000..89b890f8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_medicine.yaml @@ -0,0 +1,69 @@ +dataset_name: professional_medicine +description: "The following are multiple choice questions (with answers) about professional\ + \ medicine.\n\nQ: A 22-year-old male marathon runner presents to the office with\ + \ the complaint of right-sided rib pain when he runs long distances. Physical examination\ + \ reveals normal heart and lung findings and an exhalation dysfunction at ribs\_\ + 4-5 on the right. Which of the following muscles or muscle groups will be most useful\ + \ in correcting this dysfunction utilizing a direct method?\n(A) anterior scalene\ + \ (B) latissimus dorsi (C) pectoralis minor (D) quadratus lumborum\nA: Let's think\ + \ step by step. We refer to Wikipedia articles on medicine for help. Among the options,\ + \ only pectoralis minor muscle origins from the outer surfaces of the 3rd to 5th\ + \ ribs. The answer is (C).\n\nQ: A 36-year-old male presents to the office with\ + \ a\_3-week\_history of low back pain. He denies any recent trauma but says that\ + \ he climbs in and out of his truck numerous times a day for his job. Examination\ + \ of the patient in the prone position reveals a deep sacral sulcus on the left,\ + \ a posterior inferior lateral angle on the right, and a lumbosacral junction that\ + \ springs freely on compression. The most likely diagnosis is\n(A) left-on-left\ + \ sacral torsion (B) left-on-right sacral torsion (C) right unilateral sacral flexion\ + \ (D) right-on-right sacral torsion\nA: Let's think step by step. We refer to Wikipedia\ + \ articles on medicine for help. The deep sulcus on the left, a posterior ILA on\ + \ the right, with a negative spring test suggests a right-on-right sacral torsion.\ + \ All other options have a deep sulcus on the right. The answer is (D).\n\nQ: A\ + \ 44-year-old man comes to the office because of a 3-day history of sore throat,\ + \ nonproductive cough, runny nose, and frontal headache. He says the headache is\ + \ worse in the morning and ibuprofen does provide some relief. He has not had shortness\ + \ of breath. Medical history is unremarkable. He takes no medications other than\ + \ the ibuprofen for pain. Vital signs are temperature 37.4\xB0C (99.4\xB0F), pulse\ + \ 88/min, respirations 18/min, and blood pressure 120/84 mm Hg. Examination of the\ + \ nares shows erythematous mucous membranes. Examination of the throat shows erythema\ + \ and follicular lymphoid hyperplasia on the posterior oropharynx. There is no palpable\ + \ cervical adenopathy. Lungs are clear to auscultation. Which of the following is\ + \ the most likely cause of this patient's symptoms?\n(A) Allergic rhinitis (B) Epstein-Barr\ + \ virus (C) Mycoplasma pneumonia (D) Rhinovirus\nA: Let's think step by step. We\ + \ refer to Wikipedia articles on medicine for help. The symptoms, especially the\ + \ headache, suggest that the most likely cause is Rhinovirus. Epstein-Barr virus\ + \ will cause swollen lymph nodes but there is no palpable cervical adenopathy. Lungs\ + \ are clear to auscultation suggests it's not Mycoplasma pneumonia. The answer is\ + \ (D).\n\nQ: A previously healthy 32-year-old woman comes to the physician 8 months\ + \ after her husband was killed in a car crash. Since that time, she has had a decreased\ + \ appetite and difficulty falling asleep. She states that she is often sad and cries\ + \ frequently. She has been rechecking the door lock five times before leaving her\ + \ house and has to count exactly five pieces of toilet paper before she uses it.\ + \ She says that she has always been a perfectionist but these urges and rituals\ + \ are new. Pharmacotherapy should be targeted to which of the following neurotransmitters?\n\ + (A) Dopamine (B) Glutamate (C) Norepinephrine (D) Serotonin\nA: Let's think step\ + \ by step. We refer to Wikipedia articles on medicine for help. The patient feels\ + \ sad and among the options, only Dopamine and Serotonin can help increase positive\ + \ emotions. Serotonin also affects digestion and metabolism, which can help the\ + \ patient's decreased appetite and sleep difficulty. The answer is (D).\n\nQ: A\ + \ 42-year-old man comes to the office for preoperative evaluation prior to undergoing\ + \ adrenalectomy scheduled in 2 weeks. One month ago, he received care in the emergency\ + \ department for pain over his right flank following a motor vehicle collision.\ + \ At that time, blood pressure was 160/100 mm Hg and CT scan of the abdomen showed\ + \ an incidental 10-cm left adrenal mass. Results of laboratory studies, including\ + \ complete blood count, serum electrolyte concentrations, and liver function tests,\ + \ were within the reference ranges. The patient otherwise had been healthy and had\ + \ never been told that he had elevated blood pressure. He takes no medications.\ + \ A follow-up visit in the office 2 weeks ago disclosed elevated urinary normetanephrine\ + \ and metanephrine and plasma aldosterone concentrations. The patient was referred\ + \ to a surgeon, who recommended the adrenalectomy. Today, vital signs are temperature\ + \ 36.6\xB0C (97.9\xB0F), pulse 100/min, respirations 14/min, and blood pressure\ + \ 170/95 mm Hg. Physical examination discloses no significant findings. Initial\ + \ preoperative preparation should include treatment with which of the following?\n\ + (A) Labetalol (B) A loading dose of potassium chloride (C) Nifedipine (D) Phenoxybenzamine\n\ + A: Let's think step by step. We refer to Wikipedia articles on medicine for help.\ + \ The symptoms and the adrenal mass suggested pheochromocytoma, and the blood pressure\ + \ indicates hypertension. Phenoxybenzamine is used to treat hypertension caused\ + \ by pheochromocytoma. The answer is (D)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_professional_medicine diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_psychology.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_psychology.yaml new file mode 100644 index 00000000..e1e5206d --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_psychology.yaml @@ -0,0 +1,47 @@ +dataset_name: professional_psychology +description: "The following are multiple choice questions (with answers) about professional\ + \ psychology.\n\nQ: In the construction of a multiple regression equation for purposes\ + \ of prediction, the optimal combination of measures is one in which the predictors\n\ + (A) are uncorrelated with each other but are moderately correlated with the criterion\ + \ (B) have low correlations with each other and low correlations with the criterion\ + \ (C) are highly intercorrelated with each other and moderately correlated with\ + \ the criterion (D) have low correlations with the criterion bur are moderately\ + \ correlated with each other\nA: Let's think step by step. We refer to Wikipedia\ + \ articles on psychology for help. The basis of multiple regression is to assess\ + \ the relationship between one continuous variable and a set of independent variables.\ + \ So the predictors should be uncorrelated with each other but are moderately correlated\ + \ with the criterion. The answer is (A).\n\nQ: There are three ways to measure the\ + \ Central Tendency: the Mean, the Median and the Mode. From your knowledge about\ + \ them, what is the mode?\n(A) less sensitive to extreme scores than the mean (B)\ + \ more useful for skewed distributions (C) sensitive to extreme values and highly\ + \ skewed distributions (D) the most frequently occurring number\nA: Let's think\ + \ step by step. We refer to Wikipedia articles on psychology for help. The definition\ + \ of mode is the most frequently occurring number. The answer is (D).\n\nQ: Carl\ + \ Jung believed that a client's transference:\n(A) is a fantasy that distracts the\ + \ client from reality. (B) represents \u201Cmixed feelings\u201D toward the therapist.\ + \ (C) \"is a form of \"\"acting out.\"\"\" (D) reflects the client\u2019s personal\ + \ and collective unconscious.\nA: Let's think step by step. We refer to Wikipedia\ + \ articles on psychology for help. Transference is a phenomenon that a person's\ + \ feelings are unconsciously redirected, so it reflects the client's personal and\ + \ collective unconscious. The answer is (D).\n\nQ: In terms of Hofstede\u2019s (1980)\ + \ five cultural dimensions, the United States scores at the top of the scale on:\n\ + (A) individualism. (B) individualism and power distance. (C) power distance and\ + \ masculinity. (D) uncertainty avoidance.\nA: Let's think step by step. We refer\ + \ to Wikipedia articles on psychology for help. US scores highest on individualism\ + \ among the five cultural dimensions. The answer is (A).\n\nQ: One of your therapy\ + \ clients asks your advice about a good weight- reduction program. You have investigated\ + \ the programs in the community and are enrolled in the one you consider the best.\ + \ This program offers a $50 bonus to its patrons for each new person they bring\ + \ into the program. Under these circumstances, your most appropriate response would\ + \ be to\n(A) tell your client the pros and cons of each program you know about except\ + \ for the one in which you are enrolled (B) recommend to your client the program\ + \ in which you are enrolled and explain the $50 bonus you will receive (C) recommend\ + \ to your client the program in which you are enrolled and offer to have the $50\ + \ bonus credited to your client's account in the program (D) tell your client the\ + \ pros and cons of each program you know about, but do not claim the $50 bonus if\ + \ your client enrolls in your program\nA: Let's think step by step. We refer to\ + \ Wikipedia articles on psychology for help. Based on the circumstances, you should\ + \ tell your client about the pros and cons of each program, but it would be inappropriate\ + \ to receive the bonus, so you should not claim the $50 bonus. The answer is (D)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_professional_psychology diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_public_relations.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_public_relations.yaml new file mode 100644 index 00000000..be4edf98 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_public_relations.yaml @@ -0,0 +1,65 @@ +dataset_name: public_relations +description: 'The following are multiple choice questions (with answers) about public + relations. + + + Q: Earth Hour was a campaign launched by which organization? + + (A) Greenpeace (B) The UN (C) Oxfam (D) World Wildlife Fund + + A: Let''s think step by step. We refer to Wikipedia articles on public relations + for help. Earth Hour is a worldwide movement oragnized launched by the World Wildlife + Fund. The answer is (D). + + + Q: In issues management, what is the most proactive approach to addressing negative + or misleading information posted online about your organization? + + (A) Buy domain names that could be used by opposition groups. (B) Post anonymous + comments on blogs to combat this information. (C) Prepare a news release that discredits + the inaccurate information. (D) Make policy changes to address complaints highlighted + on these sites. + + A: Let''s think step by step. We refer to Wikipedia articles on public relations + for help. In issues management, the most proactive approach to addressing negative + or misleading information posted online is to make policy changes to address complaints + highlighted on those sites. The answer is (D). + + + Q: At which stage in the planning process would a situation analysis be carried + out? + + (A) Defining the program (B) Planning the program (C) Taking action and implementing + ideas (D) Evaluation of the program + + A: Let''s think step by step. We refer to Wikipedia articles on public relations + for help. Situation analyses are typically carried out during the planning process + stage of defining the program. The answer is (A). + + + Q: Which of these statements is true of the Vatican in 2010 at the time of the accusations + of child abuse cover-ups? + + (A) There was a coordinated media response. (B) Consistent messages were communicated. + (C) Criticisms were taken as attacks on the Catholic Church. (D) The credibility + of the Vatican was upheld. + + A: Let''s think step by step. We refer to Wikipedia articles on public relations + for help. In 2010 when there were accusations of child abuse cover-ups, the Vatican + took those criticisms as attacks on the Catholic Church. The answer is (C). + + + Q: What should a public relations media practitioner do if she does not know the + answer to a reporter''s question? + + (A) Give the reporter other information she is certain is correct. (B) Say that + the information is ''off the record'' and will be disseminated later. (C) Say ''I + don''t know'' and promise to provide the information later. (D) Say ''no comment,'' + rather than appear uninformed. + + A: Let''s think step by step. We refer to Wikipedia articles on public relations + for help. If a public relations media practitioner does not know the answer to a + reporter''s question, they should say ''I don''t know'' and offer to provide the + information later. The answer is (C).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_public_relations diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_security_studies.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_security_studies.yaml new file mode 100644 index 00000000..b08c321a --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_security_studies.yaml @@ -0,0 +1,85 @@ +dataset_name: security_studies +description: "The following are multiple choice questions (with answers) about security\ + \ studies.\n\nQ: What are the frameworks of analysis within which terrorism has\ + \ been considered (as of 2020)?\n(A) Competition between larger nations has resulted\ + \ in some countries actively supporting terrorist groups to undermine the strength\ + \ of rival states. Terrorist networks are extended patronage clubs maintained and\ + \ paid for by their donor states and are conceptualised as being like state actors,\ + \ to be dealt with using military force. (B) Globalization has enabled the internationalization\ + \ of terrorist activities by opening up their operational space, although coordination\ + \ is still managed from a geographical base. This suggests that terrorist groups\ + \ are nationally structured which means that terrorism cannot be considered in terms\ + \ of a war to be defeated militarily without having serious implications on the\ + \ indigenous population. (C) Terrorism can be viewed as a problem to be resolved\ + \ by military means (war on terrorism), by normal police techniques (terrorism as\ + \ crime), or as a medical problem with underlying causes and symptoms (terrorism\ + \ as disease). (D) Terrorism is viewed as a criminal problem. The criminalization\ + \ of terrorism has two important implications. Firstly, it suggests that terrorism\ + \ can be eradicated - terrorists can be caught and brought to trial by normal judicial\ + \ proceedings thereby removing the threat from society - and secondly, it suggests\ + \ that preventative crime techniques are applicable to prevent its development.\n\ + A: Let's think step by step. We refer to Wikipedia articles on security studies\ + \ for help. (A) is wrong because it is not competition between larger nations that\ + \ causes terrorism. \n(B) is wrong because globalization is not the cause of terrorism.\n\ + (C) is correct because the US undertook the war on terrorism. \n(D) is wrong because\ + \ preventative crime techniques will likely not end terrorism. The answer is (C).\n\ + \nQ: Which of the following is the best lens through which to investigate the role\ + \ of child soldiers?\n(A) Child soldiers are victims of combat that need re-education\ + \ and rehabilitation. (B) Children and their mothers are not active subjects in\ + \ warfare and are best considered as subjects in the private sphere. (C) Children\ + \ are most often innocent bystanders in war and are best used as signifiers of peace.\ + \ (D) Children have political subjecthood that is missed when they are considered\ + \ as passive victims of warfare.\nA: Let's think step by step. We refer to Wikipedia\ + \ articles on security studies for help. Child soliders as a political topic can\ + \ be missed when they are considered passive victims of warfare. The answer is (D).\n\ + \nQ: How can we best describe the relationship between the state-centric approach\ + \ and the concept of human security?\n(A) There are such wide divisions within the\ + \ human security framework regarding the nature of threats and referent objects\ + \ that no widely applicable comparisons between state-centric approaches and human\ + \ security can be drawn. (B) By adopting the framework of human security, the limitations\ + \ of the realist state-centric approach become evident. Whilst human security defines\ + \ the referent object as the person or population, state-centric approaches prioritise\ + \ the security of the state, de-prioritizing the pursuit of human security. (C)\ + \ The state-centric approach to security is a faction of human security, usually\ + \ defined within the broad school of human security. By being state-centric this\ + \ approach prioritises the individual as the referent object in security studies.\ + \ (D) Both the state-centric and human-centric approaches to security are mutually\ + \ exclusive and offer a sufficient analytic framework with which to understand the\ + \ international security system. It is therefore the role of security analysts to\ + \ determine which of these substantial concepts is correct, and which should be\ + \ discarded.\nA: Let's think step by step. We refer to Wikipedia articles on security\ + \ studies for help. Human security focuses on a person or population whereas state-centric\ + \ approaches focus on the state while deprioritizing human security. The answer\ + \ is (B).\n\nQ: In order to become securitized, a threat must be presented in which\ + \ of these ways?\n(A) As an existential threat that requires immediate and extraordinary\ + \ action, posing a threat to the survival of the state or to societal security.\ + \ (B) As requiring immediate and extraordinary action by the state, threatening\ + \ the survival of a referent object and therefore warranting the use of measures\ + \ not normally employed in the political realm. (C) As an urgent threat to the survival\ + \ of the referent object, so serious that it legitimises the employment of extraordinary\ + \ action in response. (D) As an urgent threat to the survival of the audience that\ + \ requires extraordinary or emergency measures.\nA: Let's think step by step. We\ + \ refer to Wikipedia articles on security studies for help. To be securitized, a\ + \ threat must be an urgent threat to the survival of the referent object. The answer\ + \ is (C).\n\nQ: What distinguishes coercive diplomacy from military force?\n(A)\ + \ Compellence is another term for coercive diplomacy, but covering a narrower set\ + \ of criteria; compellence covers those threats aimed at initiating adversary action.\ + \ A threat to coerce a state to give up part of its territory would count as coercive\ + \ diplomacy, as long as that threat proactively initiates action before reactive\ + \ diplomacy is taken. (B) Coercive diplomacy constitutes the threats of limited\ + \ force to induce adversary's incentive to comply with the coercer's demands. It\ + \ is an influence strategy that is intended to obtain compliance: the use of force\ + \ to defeat an opponent first does not count. It leaves an element of choice with\ + \ the target to comply, or to continue. (C) Military force, or the threat of military\ + \ force, utilises fear to achieve strategic objectives. Coercive diplomacy is differentiated\ + \ from this approach, because it does not use fear as a tool for coercing an adversary.\ + \ (D) Coercive diplomacy is employed to use force but to limit its effects on the\ + \ international community. Coercive diplomacy is an aggressive strategy that is\ + \ intended to obtain compliance through defeat. It does not leave an element of\ + \ choice with the target, the target either being forced to comply or engage in\ + \ conflict. It seeks to control by imposing compliance by removing any opportunity\ + \ for negotiation or concession.\nA: Let's think step by step. We refer to Wikipedia\ + \ articles on security studies for help. Coercive diplomacy uses the threat of force\ + \ to induce the opponent to comply with demands. The answer is (B)." +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_security_studies diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_sociology.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_sociology.yaml new file mode 100644 index 00000000..38974b00 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_sociology.yaml @@ -0,0 +1,67 @@ +dataset_name: sociology +description: 'The following are multiple choice questions (with answers) about sociology. + + + Q: Which of the following is not a problem associated with official statistics on + strike action? + + (A) most strikes go unnoticed by employers and the mass media (B) not all industrial + disputes will be reported by the employer (C) the definition of strikes excludes + those that involve fewer than ten workers or last less than one day (D) it is hard + to compare strikes that were measured in different ways + + A: Let''s think step by step. We refer to Wikipedia articles on sociology for help. + Official statistics on strike action can be problematic because not all industrial + disputes will be reported by employers, the definition of strikes excludes those + that involves fewer than ten workers or last less than one day, and it is hard to + compare strikes that were measured in different ways. Thus, (A) is not a problem + associated with official statistics on strike action. The answer is (A). + + + Q: What does Berger (1963) describe as a metaphor for social reality? + + (A) a fairground ride (B) a circus (C) a puppet theatre (D) a ballet + + A: Let''s think step by step. We refer to Wikipedia articles on sociology for help. + Berger describes social reality using the metaphor of a puppet theatre. The answer + is (C). + + + Q: The term ''hegemony'' refers to: + + (A) the tendency for the working class not to realize their own interests (B) a + dominant ideology that legitimates economic, political and cultural power (C) a + form of dual consciousness based on ideology and everyday experiences (D) a mode + of payment given for outstanding topiary + + A: Let''s think step by step. We refer to Wikipedia articles on sociology for help. + Hegemony refers to a dominant ideology that legitimates economic, policital, and + cultural power. The answer is (B). + + + Q: The shift from ''civil religion'' to ''common religion'' means that: + + (A) the increasing bureaucracy of the state has made religion only a marginal part + of our lives (B) despite the weakening of traditional authority, our everyday lives + and ''common sense'' remain shaped by religious beliefs and values (C) religious + participation in collective worship may have declined, but people still practise + their faiths in private (D) people are much more likely to discuss their religious + beliefs in public, informal settings + + A: Let''s think step by step. We refer to Wikipedia articles on sociology for help. + The shift from civil religion to common religion means that despite the weakening + of traditional authority, our everyday lives and common sense remain shaped by religious + beliefs and values. The answer is (B). + + + Q: Which of the following did the post-war welfare state of 1948 not aim to provide: + + (A) free health care and education for all (B) a minimum wage (C) full employment + (D) universal welfare + + A: Let''s think step by step. We refer to Wikipedia articles on sociology for help. + The post-war welfare state of 1948 aimed to provide free healthcare and education, + full employment, and universal welfare. But it did not aim to provide a minimum + wage. The answer is (B).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_sociology diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_us_foreign_policy.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_us_foreign_policy.yaml new file mode 100644 index 00000000..6340aee3 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_us_foreign_policy.yaml @@ -0,0 +1,66 @@ +dataset_name: us_foreign_policy +description: 'The following are multiple choice questions (with answers) about us + foreign policy. + + + Q: How did Donald Trump attack globalization in the 2016 campaign? + + (A) Globalization had made men like him too rich (B) Globalization only benefited + certain American states, such as New York (C) Liberal elites had encouraged globalization, + while ''ordinary Americans'' lost jobs because of it (D) Globalization encouraged + damaging trade wars + + A: Let''s think step by step. We refer to Wikipedia articles on us foreign policy + for help. Trump attacked globalization because he believed ordinary Americans lost + jobs due to it, and so he wanted to blame liberals who had encouraged it. The answer + is (C). + + + Q: How did NSC-68 change U.S. strategy? + + (A) It globalized containment. (B) It militarized containment. (C) It called for + the development of the hydrogen bomb. (D) All of the above + + A: Let''s think step by step. We refer to Wikipedia articles on us foreign policy + for help. NSC-68 outlined a variety of courses of action, including globalization + of containment, militarization of contaiment, and the development of the hydrogen + bomb. The answer is (D). + + + Q: How do Defensive Realism and Offensive Realism differ in their explanation of + state behaviour? + + (A) Defensive realists place greater emphasis on the role of international institutions + (B) Defensive realists place less emphasis on geographical factors (C) Offensive + realists give more priority to the national interest than Defensive realists. (D) + Defensive realists believe states are security maximizers, while Offensive realists + believe states to be power maximizers + + A: Let''s think step by step. We refer to Wikipedia articles on us foreign policy + for help. While defensive realism advocates that states are security maximizers, + offensive realists think of states as power maximizers. The answer is (D). + + + Q: The realm of policy decisions concerned primarily with relations between the + United States and the rest of the world is known as + + (A) terrorism policy. (B) economic policy. (C) foreign policy. (D) international + policy. + + A: Let''s think step by step. We refer to Wikipedia articles on us foreign policy + for help. The topic of policy decisions concerns with relations between the US and + the rest of the world is known as foreign policy. The answer is (C). + + + Q: How did the 2008 financial crisis affect America''s international reputation? + + (A) It damaged support for the US model of political economy and capitalism (B) + It created anger at the United States for exaggerating the crisis (C) It increased + support for American global leadership under President Obama (D) It reduced global + use of the US dollar + + A: Let''s think step by step. We refer to Wikipedia articles on us foreign policy + for help. The 2008 financial crisis damanged the international reputation of the + American model of political economy and capitalism. The answer is (A).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_us_foreign_policy diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_virology.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_virology.yaml new file mode 100644 index 00000000..5bbd7a2c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_virology.yaml @@ -0,0 +1,55 @@ +dataset_name: virology +description: 'The following are multiple choice questions (with answers) about virology. + + + Q: The median survival time to AIDS and death was established by following: + + (A) Seroprevalent HIV-infected individuals (B) Seronegatives (C) Seroconverters + (D) High-risk seronegatives + + A: Let''s think step by step. We refer to Wikipedia articles on virology for help. + The median survival time to AIDS and death was established as a result of the development + of seroconverters. The answer is (C). + + + Q: Which of the following is a morphological characteristic of the paramyxoviruses. + + (A) Fragile viruses often visualised with RNA spewing from the inside (B) Elongate + viruses (C) Icosahedral viruses with envelope (D) Very large viruses + + A: Let''s think step by step. We refer to Wikipedia articles on virology for help. + Paramyxoviruses are fragile viruses often visualised with RNA spewing from the inside. + The answer is (A). + + + Q: The most important goal of a behavioral intervention is: + + (A) Change in behavior (B) Comprehensive coverage (C) Effective use of behavioral + theory (D) Sustained behavior change + + A: Let''s think step by step. We refer to Wikipedia articles on virology for help. + The prim goal of a behavioral intervention is to cause sustained behavior change. + The answer is (D). + + + Q: A key factor facilitating the application of nested case-control studies from + the MACS was: + + (A) Data collection (B) Establishment of a repository of biologic specimens (C) + Participant interest (D) Administration of the questionnaire by staff + + A: Let''s think step by step. We refer to Wikipedia articles on virology for help. + The Multicenter AIDS Cohort Study''s use of nested case-control studies was facilitated + by the establishment of a repository of biologic specimens. The answer is (B). + + + Q: Why are parvoviruses a highly impactful parasite? + + (A) Because they have no nucleic acid (B) They require a helper virus (C) Only replicate + in dividing cells (D) Can integrate into host chromosomes + + A: Let''s think step by step. We refer to Wikipedia articles on virology for help. + Paroviruses are highly impactful because they do not have nucleic acid. The answer + is (A).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_virology diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_world_religions.yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_world_religions.yaml new file mode 100644 index 00000000..c01adcdb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_world_religions.yaml @@ -0,0 +1,53 @@ +dataset_name: world_religions +description: 'The following are multiple choice questions (with answers) about world + religions. + + + Q: How can the Upanishads be characterized? + + (A) Ritual texts (B) Philosophical texts (C) Hymns (D) Origin stories + + A: Let''s think step by step. We refer to Wikipedia articles on world religions + for help. The Upanishads are the most recent part of Vedas (the oldest scriptures + in Hinduism) and supplied the basis of later Hindu philosophy. So they are philosophical + texts. The answer is (B). + + + Q: What is the Second Gem in Buddhism? + + (A) The Dharma (B) The Sangha (C) The Buddha (D) The Bodhisattva + + A: Let''s think step by step. We refer to Wikipedia articles on world religions + for help. The Second Gem in Buddhism is The Dharma. The answer is (A). + + + Q: Which Japanese government promoted a kind of national cult based on the emperor + and his associations with kami? + + (A) Honen (B) Tanaka (C) Tokugawa (D) Meiji + + A: Let''s think step by step. We refer to Wikipedia articles on world religions + for help. The promotion of a national cult based on the emperor and his associations + with Kami happened during the reign of Emperor Meiji (1852-1912). The answer is + (D). + + + Q: In which dynasty was the "Mandate of Heaven" developed to legitimatize the new + rulers? + + (A) Shang (B) Zhou (C) Han (D) Xia + + A: Let''s think step by step. We refer to Wikipedia articles on world religions + for help. The "Mandate of Heaven" was developed as an ancient Chinese philosophical + concept during the Zhou Dynasty (1046-256 BCE). The answer is (B). + + + Q: What is the sign of the covenant for Jewish males? + + (A) The rainbow (B) Circumcision (C) A son (D) Bar mitzvah + + A: Let''s think step by step. We refer to Wikipedia articles on world religions + for help. In Judaism, the most distinctive sign of the covenant is circumcision + (brit milah). The answer is (B).' +include: _mmlu_flan_cot_fewshot_template_yaml +task: mmlu_flan_cot_fewshot_world_religions diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml new file mode 100644 index 00000000..e5b8e429 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml @@ -0,0 +1,25 @@ +group: mmlu_flan_cot_zeroshot +dataset_path: cais/mmlu +validation_split: validation +fewshot_split: dev +doc_to_text: "\n\nQ: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: Let's think step by step." +output_type: greedy_until +fewshot_delimiter: "" +doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "(?<=The answer is )(.*)(.)" + - function: "take_first" \ No newline at end of file diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml new file mode 100644 index 00000000..17bccf1f --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml @@ -0,0 +1,8 @@ +dataset_name: abstract_algebra +description: 'The following are multiple choice questions (with answers) about abstract + algebra. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_abstract_algebra diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_anatomy.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_anatomy.yaml new file mode 100644 index 00000000..6e14fbc6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_anatomy.yaml @@ -0,0 +1,7 @@ +dataset_name: anatomy +description: 'The following are multiple choice questions (with answers) about anatomy. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_anatomy diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml new file mode 100644 index 00000000..b1ca9f52 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml @@ -0,0 +1,7 @@ +dataset_name: astronomy +description: 'The following are multiple choice questions (with answers) about astronomy. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_astronomy diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml new file mode 100644 index 00000000..53f3a78f --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml @@ -0,0 +1,8 @@ +dataset_name: business_ethics +description: 'The following are multiple choice questions (with answers) about business + ethics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_business_ethics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_clinical_knowledge.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_clinical_knowledge.yaml new file mode 100644 index 00000000..f858d671 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_clinical_knowledge.yaml @@ -0,0 +1,8 @@ +dataset_name: clinical_knowledge +description: 'The following are multiple choice questions (with answers) about clinical + knowledge. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_clinical_knowledge diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_biology.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_biology.yaml new file mode 100644 index 00000000..93471b6a --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_biology.yaml @@ -0,0 +1,8 @@ +dataset_name: college_biology +description: 'The following are multiple choice questions (with answers) about college + biology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_college_biology diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_chemistry.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_chemistry.yaml new file mode 100644 index 00000000..5f619baa --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_chemistry.yaml @@ -0,0 +1,8 @@ +dataset_name: college_chemistry +description: 'The following are multiple choice questions (with answers) about college + chemistry. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_college_chemistry diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_computer_science.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_computer_science.yaml new file mode 100644 index 00000000..865b91bf --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_computer_science.yaml @@ -0,0 +1,8 @@ +dataset_name: college_computer_science +description: 'The following are multiple choice questions (with answers) about college + computer science. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_college_computer_science diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_mathematics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_mathematics.yaml new file mode 100644 index 00000000..1f8a89fa --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_mathematics.yaml @@ -0,0 +1,8 @@ +dataset_name: college_mathematics +description: 'The following are multiple choice questions (with answers) about college + mathematics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_college_mathematics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_medicine.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_medicine.yaml new file mode 100644 index 00000000..e852c64b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_medicine.yaml @@ -0,0 +1,8 @@ +dataset_name: college_medicine +description: 'The following are multiple choice questions (with answers) about college + medicine. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_college_medicine diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_physics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_physics.yaml new file mode 100644 index 00000000..f215c2f0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_physics.yaml @@ -0,0 +1,8 @@ +dataset_name: college_physics +description: 'The following are multiple choice questions (with answers) about college + physics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_college_physics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml new file mode 100644 index 00000000..402f7bdc --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml @@ -0,0 +1,8 @@ +dataset_name: computer_security +description: 'The following are multiple choice questions (with answers) about computer + security. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_computer_security diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml new file mode 100644 index 00000000..c3ad6376 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml @@ -0,0 +1,8 @@ +dataset_name: conceptual_physics +description: 'The following are multiple choice questions (with answers) about conceptual + physics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_conceptual_physics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml new file mode 100644 index 00000000..dad5a83b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml @@ -0,0 +1,7 @@ +dataset_name: econometrics +description: 'The following are multiple choice questions (with answers) about econometrics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_econometrics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml new file mode 100644 index 00000000..72a08dca --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml @@ -0,0 +1,8 @@ +dataset_name: electrical_engineering +description: 'The following are multiple choice questions (with answers) about electrical + engineering. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_electrical_engineering diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_elementary_mathematics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_elementary_mathematics.yaml new file mode 100644 index 00000000..0531f23e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_elementary_mathematics.yaml @@ -0,0 +1,8 @@ +dataset_name: elementary_mathematics +description: 'The following are multiple choice questions (with answers) about elementary + mathematics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_elementary_mathematics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml new file mode 100644 index 00000000..80b26401 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml @@ -0,0 +1,8 @@ +dataset_name: formal_logic +description: 'The following are multiple choice questions (with answers) about formal + logic. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_formal_logic diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_global_facts.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_global_facts.yaml new file mode 100644 index 00000000..491d0db4 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_global_facts.yaml @@ -0,0 +1,8 @@ +dataset_name: global_facts +description: 'The following are multiple choice questions (with answers) about global + facts. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_global_facts diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml new file mode 100644 index 00000000..32da2e26 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_biology +description: 'The following are multiple choice questions (with answers) about high + school biology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_biology diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml new file mode 100644 index 00000000..5968e54e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_chemistry +description: 'The following are multiple choice questions (with answers) about high + school chemistry. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_chemistry diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_computer_science.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_computer_science.yaml new file mode 100644 index 00000000..2666de90 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_computer_science.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_computer_science +description: 'The following are multiple choice questions (with answers) about high + school computer science. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_computer_science diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml new file mode 100644 index 00000000..fb59ada4 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_european_history +description: 'The following are multiple choice questions (with answers) about high + school european history. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_european_history diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml new file mode 100644 index 00000000..ed3fca55 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_geography +description: 'The following are multiple choice questions (with answers) about high + school geography. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_geography diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_government_and_politics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_government_and_politics.yaml new file mode 100644 index 00000000..62803b4b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_government_and_politics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_government_and_politics +description: 'The following are multiple choice questions (with answers) about high + school government and politics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_government_and_politics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_macroeconomics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_macroeconomics.yaml new file mode 100644 index 00000000..f973b58d --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_macroeconomics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_macroeconomics +description: 'The following are multiple choice questions (with answers) about high + school macroeconomics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_macroeconomics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_mathematics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_mathematics.yaml new file mode 100644 index 00000000..550dfcf1 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_mathematics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_mathematics +description: 'The following are multiple choice questions (with answers) about high + school mathematics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_mathematics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_microeconomics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_microeconomics.yaml new file mode 100644 index 00000000..8a1e4c4c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_microeconomics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_microeconomics +description: 'The following are multiple choice questions (with answers) about high + school microeconomics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_microeconomics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_physics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_physics.yaml new file mode 100644 index 00000000..4997e712 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_physics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_physics +description: 'The following are multiple choice questions (with answers) about high + school physics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_physics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml new file mode 100644 index 00000000..a3e801ca --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_psychology +description: 'The following are multiple choice questions (with answers) about high + school psychology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_psychology diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_statistics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_statistics.yaml new file mode 100644 index 00000000..d057cbef --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_statistics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_statistics +description: 'The following are multiple choice questions (with answers) about high + school statistics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_statistics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_us_history.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_us_history.yaml new file mode 100644 index 00000000..583d9591 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_us_history.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_us_history +description: 'The following are multiple choice questions (with answers) about high + school us history. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_us_history diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml new file mode 100644 index 00000000..40445582 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_world_history +description: 'The following are multiple choice questions (with answers) about high + school world history. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_high_school_world_history diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_aging.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_aging.yaml new file mode 100644 index 00000000..c6db4c1c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_aging.yaml @@ -0,0 +1,8 @@ +dataset_name: human_aging +description: 'The following are multiple choice questions (with answers) about human + aging. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_human_aging diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_sexuality.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_sexuality.yaml new file mode 100644 index 00000000..41795660 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_sexuality.yaml @@ -0,0 +1,8 @@ +dataset_name: human_sexuality +description: 'The following are multiple choice questions (with answers) about human + sexuality. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_human_sexuality diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_international_law.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_international_law.yaml new file mode 100644 index 00000000..da1273b0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_international_law.yaml @@ -0,0 +1,8 @@ +dataset_name: international_law +description: 'The following are multiple choice questions (with answers) about international + law. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_international_law diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_jurisprudence.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_jurisprudence.yaml new file mode 100644 index 00000000..e1a6a28b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_jurisprudence.yaml @@ -0,0 +1,7 @@ +dataset_name: jurisprudence +description: 'The following are multiple choice questions (with answers) about jurisprudence. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_jurisprudence diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_logical_fallacies.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_logical_fallacies.yaml new file mode 100644 index 00000000..e94cde17 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_logical_fallacies.yaml @@ -0,0 +1,8 @@ +dataset_name: logical_fallacies +description: 'The following are multiple choice questions (with answers) about logical + fallacies. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_logical_fallacies diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_machine_learning.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_machine_learning.yaml new file mode 100644 index 00000000..a17387bd --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_machine_learning.yaml @@ -0,0 +1,8 @@ +dataset_name: machine_learning +description: 'The following are multiple choice questions (with answers) about machine + learning. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_machine_learning diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml new file mode 100644 index 00000000..68fc6ba2 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml @@ -0,0 +1,7 @@ +dataset_name: management +description: 'The following are multiple choice questions (with answers) about management. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_management diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_marketing.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_marketing.yaml new file mode 100644 index 00000000..f6c6444c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_marketing.yaml @@ -0,0 +1,7 @@ +dataset_name: marketing +description: 'The following are multiple choice questions (with answers) about marketing. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_marketing diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_medical_genetics.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_medical_genetics.yaml new file mode 100644 index 00000000..2490826b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_medical_genetics.yaml @@ -0,0 +1,8 @@ +dataset_name: medical_genetics +description: 'The following are multiple choice questions (with answers) about medical + genetics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_medical_genetics diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_miscellaneous.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_miscellaneous.yaml new file mode 100644 index 00000000..5aebaef8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_miscellaneous.yaml @@ -0,0 +1,7 @@ +dataset_name: miscellaneous +description: 'The following are multiple choice questions (with answers) about miscellaneous. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_miscellaneous diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml new file mode 100644 index 00000000..85829454 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml @@ -0,0 +1,8 @@ +dataset_name: moral_disputes +description: 'The following are multiple choice questions (with answers) about moral + disputes. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_moral_disputes diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_scenarios.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_scenarios.yaml new file mode 100644 index 00000000..f8a31ddc --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_scenarios.yaml @@ -0,0 +1,8 @@ +dataset_name: moral_scenarios +description: 'The following are multiple choice questions (with answers) about moral + scenarios. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_moral_scenarios diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_nutrition.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_nutrition.yaml new file mode 100644 index 00000000..238c3f1c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_nutrition.yaml @@ -0,0 +1,7 @@ +dataset_name: nutrition +description: 'The following are multiple choice questions (with answers) about nutrition. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_nutrition diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_philosophy.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_philosophy.yaml new file mode 100644 index 00000000..c4a8fb47 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_philosophy.yaml @@ -0,0 +1,7 @@ +dataset_name: philosophy +description: 'The following are multiple choice questions (with answers) about philosophy. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_philosophy diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_prehistory.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_prehistory.yaml new file mode 100644 index 00000000..07f31813 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_prehistory.yaml @@ -0,0 +1,7 @@ +dataset_name: prehistory +description: 'The following are multiple choice questions (with answers) about prehistory. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_prehistory diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_accounting.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_accounting.yaml new file mode 100644 index 00000000..82b5ff2c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_accounting.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_accounting +description: 'The following are multiple choice questions (with answers) about professional + accounting. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_professional_accounting diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_law.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_law.yaml new file mode 100644 index 00000000..32210b49 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_law.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_law +description: 'The following are multiple choice questions (with answers) about professional + law. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_professional_law diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_medicine.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_medicine.yaml new file mode 100644 index 00000000..ed9eebe1 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_medicine.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_medicine +description: 'The following are multiple choice questions (with answers) about professional + medicine. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_professional_medicine diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_psychology.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_psychology.yaml new file mode 100644 index 00000000..7110b840 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_psychology.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_psychology +description: 'The following are multiple choice questions (with answers) about professional + psychology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_professional_psychology diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml new file mode 100644 index 00000000..5138cdd8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml @@ -0,0 +1,8 @@ +dataset_name: public_relations +description: 'The following are multiple choice questions (with answers) about public + relations. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_public_relations diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_security_studies.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_security_studies.yaml new file mode 100644 index 00000000..84c359d7 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_security_studies.yaml @@ -0,0 +1,8 @@ +dataset_name: security_studies +description: 'The following are multiple choice questions (with answers) about security + studies. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_security_studies diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml new file mode 100644 index 00000000..fed1dc49 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml @@ -0,0 +1,7 @@ +dataset_name: sociology +description: 'The following are multiple choice questions (with answers) about sociology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_sociology diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_us_foreign_policy.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_us_foreign_policy.yaml new file mode 100644 index 00000000..d94f60e9 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_us_foreign_policy.yaml @@ -0,0 +1,8 @@ +dataset_name: us_foreign_policy +description: 'The following are multiple choice questions (with answers) about us + foreign policy. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_us_foreign_policy diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_virology.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_virology.yaml new file mode 100644 index 00000000..feaa8b06 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_virology.yaml @@ -0,0 +1,7 @@ +dataset_name: virology +description: 'The following are multiple choice questions (with answers) about virology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_virology diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_world_religions.yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_world_religions.yaml new file mode 100644 index 00000000..fe2b4c42 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_world_religions.yaml @@ -0,0 +1,8 @@ +dataset_name: world_religions +description: 'The following are multiple choice questions (with answers) about world + religions. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_cot_zeroshot_world_religions diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml new file mode 100644 index 00000000..b369024c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml @@ -0,0 +1,18 @@ +group: mmlu_flan +dataset_path: cais/mmlu +validation_split: validation +fewshot_split: dev +doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA:" +output_type: greedy_until +doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 \ No newline at end of file diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml new file mode 100644 index 00000000..eb38e0fe --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml @@ -0,0 +1,12 @@ +group: mmlu_flan_loglikelihood +dataset_path: cais/mmlu +validation_split: validation +fewshot_split: dev +doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA:" +output_type: multiple_choice +doc_to_choice: ['(A)', '(B)', '(C)', '(D)'] +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml new file mode 100644 index 00000000..9fca2117 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml @@ -0,0 +1,8 @@ +dataset_name: abstract_algebra +description: 'The following are multiple choice questions (with answers) about abstract + algebra. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_abstract_algebra diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_anatomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_anatomy.yaml new file mode 100644 index 00000000..e8978402 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_anatomy.yaml @@ -0,0 +1,7 @@ +dataset_name: anatomy +description: 'The following are multiple choice questions (with answers) about anatomy. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_anatomy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_astronomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_astronomy.yaml new file mode 100644 index 00000000..66902758 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_astronomy.yaml @@ -0,0 +1,7 @@ +dataset_name: astronomy +description: 'The following are multiple choice questions (with answers) about astronomy. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_astronomy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml new file mode 100644 index 00000000..f75a48fd --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml @@ -0,0 +1,8 @@ +dataset_name: business_ethics +description: 'The following are multiple choice questions (with answers) about business + ethics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_business_ethics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml new file mode 100644 index 00000000..07a3fe79 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml @@ -0,0 +1,8 @@ +dataset_name: clinical_knowledge +description: 'The following are multiple choice questions (with answers) about clinical + knowledge. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_clinical_knowledge diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml new file mode 100644 index 00000000..7465f0d3 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml @@ -0,0 +1,8 @@ +dataset_name: college_biology +description: 'The following are multiple choice questions (with answers) about college + biology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_college_biology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml new file mode 100644 index 00000000..17e7fbde --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml @@ -0,0 +1,8 @@ +dataset_name: college_chemistry +description: 'The following are multiple choice questions (with answers) about college + chemistry. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_college_chemistry diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml new file mode 100644 index 00000000..d0032874 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml @@ -0,0 +1,8 @@ +dataset_name: college_computer_science +description: 'The following are multiple choice questions (with answers) about college + computer science. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_college_computer_science diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml new file mode 100644 index 00000000..be1e01b2 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml @@ -0,0 +1,8 @@ +dataset_name: college_mathematics +description: 'The following are multiple choice questions (with answers) about college + mathematics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_college_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml new file mode 100644 index 00000000..4c8aa79e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml @@ -0,0 +1,8 @@ +dataset_name: college_medicine +description: 'The following are multiple choice questions (with answers) about college + medicine. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_college_medicine diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml new file mode 100644 index 00000000..cd07980b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml @@ -0,0 +1,8 @@ +dataset_name: college_physics +description: 'The following are multiple choice questions (with answers) about college + physics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_college_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml new file mode 100644 index 00000000..93dc9040 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml @@ -0,0 +1,8 @@ +dataset_name: computer_security +description: 'The following are multiple choice questions (with answers) about computer + security. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_computer_security diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml new file mode 100644 index 00000000..2f313298 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml @@ -0,0 +1,8 @@ +dataset_name: conceptual_physics +description: 'The following are multiple choice questions (with answers) about conceptual + physics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_conceptual_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_econometrics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_econometrics.yaml new file mode 100644 index 00000000..b46c90cb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_econometrics.yaml @@ -0,0 +1,7 @@ +dataset_name: econometrics +description: 'The following are multiple choice questions (with answers) about econometrics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_econometrics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml new file mode 100644 index 00000000..5d1ccca1 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml @@ -0,0 +1,8 @@ +dataset_name: electrical_engineering +description: 'The following are multiple choice questions (with answers) about electrical + engineering. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_electrical_engineering diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml new file mode 100644 index 00000000..7260b752 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml @@ -0,0 +1,8 @@ +dataset_name: elementary_mathematics +description: 'The following are multiple choice questions (with answers) about elementary + mathematics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_elementary_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml new file mode 100644 index 00000000..7dd42af5 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml @@ -0,0 +1,8 @@ +dataset_name: formal_logic +description: 'The following are multiple choice questions (with answers) about formal + logic. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_formal_logic diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml new file mode 100644 index 00000000..373f99fb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml @@ -0,0 +1,8 @@ +dataset_name: global_facts +description: 'The following are multiple choice questions (with answers) about global + facts. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_global_facts diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml new file mode 100644 index 00000000..334286dc --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_biology +description: 'The following are multiple choice questions (with answers) about high + school biology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_biology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml new file mode 100644 index 00000000..f4d3bcfb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_chemistry +description: 'The following are multiple choice questions (with answers) about high + school chemistry. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_chemistry diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml new file mode 100644 index 00000000..03e0411b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_computer_science +description: 'The following are multiple choice questions (with answers) about high + school computer science. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_computer_science diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml new file mode 100644 index 00000000..a7a73a24 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_european_history +description: 'The following are multiple choice questions (with answers) about high + school european history. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_european_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml new file mode 100644 index 00000000..d7b29960 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_geography +description: 'The following are multiple choice questions (with answers) about high + school geography. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_geography diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml new file mode 100644 index 00000000..dbb195e2 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_government_and_politics +description: 'The following are multiple choice questions (with answers) about high + school government and politics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_government_and_politics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml new file mode 100644 index 00000000..a8e6c4cd --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_macroeconomics +description: 'The following are multiple choice questions (with answers) about high + school macroeconomics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_macroeconomics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml new file mode 100644 index 00000000..35adc8b7 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_mathematics +description: 'The following are multiple choice questions (with answers) about high + school mathematics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml new file mode 100644 index 00000000..a0887261 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_microeconomics +description: 'The following are multiple choice questions (with answers) about high + school microeconomics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_microeconomics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml new file mode 100644 index 00000000..63a77a7d --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_physics +description: 'The following are multiple choice questions (with answers) about high + school physics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml new file mode 100644 index 00000000..7d738494 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_psychology +description: 'The following are multiple choice questions (with answers) about high + school psychology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_psychology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml new file mode 100644 index 00000000..6b3e4b5a --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_statistics +description: 'The following are multiple choice questions (with answers) about high + school statistics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_statistics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml new file mode 100644 index 00000000..e80b64e1 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_us_history +description: 'The following are multiple choice questions (with answers) about high + school us history. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_us_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml new file mode 100644 index 00000000..83d55ae6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_world_history +description: 'The following are multiple choice questions (with answers) about high + school world history. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_high_school_world_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml new file mode 100644 index 00000000..2ff416b8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml @@ -0,0 +1,8 @@ +dataset_name: human_aging +description: 'The following are multiple choice questions (with answers) about human + aging. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_human_aging diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml new file mode 100644 index 00000000..2d7316ac --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml @@ -0,0 +1,8 @@ +dataset_name: human_sexuality +description: 'The following are multiple choice questions (with answers) about human + sexuality. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_human_sexuality diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml new file mode 100644 index 00000000..547cb3c0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml @@ -0,0 +1,8 @@ +dataset_name: international_law +description: 'The following are multiple choice questions (with answers) about international + law. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_international_law diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_jurisprudence.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_jurisprudence.yaml new file mode 100644 index 00000000..51613f16 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_jurisprudence.yaml @@ -0,0 +1,7 @@ +dataset_name: jurisprudence +description: 'The following are multiple choice questions (with answers) about jurisprudence. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_jurisprudence diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml new file mode 100644 index 00000000..1b8a4d2b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml @@ -0,0 +1,8 @@ +dataset_name: logical_fallacies +description: 'The following are multiple choice questions (with answers) about logical + fallacies. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_logical_fallacies diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml new file mode 100644 index 00000000..60973953 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml @@ -0,0 +1,8 @@ +dataset_name: abstract_algebra +description: 'The following are multiple choice questions (with answers) about abstract + algebra. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_abstract_algebra diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_anatomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_anatomy.yaml new file mode 100644 index 00000000..ff927e05 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_anatomy.yaml @@ -0,0 +1,7 @@ +dataset_name: anatomy +description: 'The following are multiple choice questions (with answers) about anatomy. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_anatomy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_astronomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_astronomy.yaml new file mode 100644 index 00000000..95329c44 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_astronomy.yaml @@ -0,0 +1,7 @@ +dataset_name: astronomy +description: 'The following are multiple choice questions (with answers) about astronomy. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_astronomy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml new file mode 100644 index 00000000..3f2bcc77 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml @@ -0,0 +1,8 @@ +dataset_name: business_ethics +description: 'The following are multiple choice questions (with answers) about business + ethics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_business_ethics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml new file mode 100644 index 00000000..780c8bf6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml @@ -0,0 +1,8 @@ +dataset_name: clinical_knowledge +description: 'The following are multiple choice questions (with answers) about clinical + knowledge. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_clinical_knowledge diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml new file mode 100644 index 00000000..7d270f47 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml @@ -0,0 +1,8 @@ +dataset_name: college_biology +description: 'The following are multiple choice questions (with answers) about college + biology. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_college_biology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml new file mode 100644 index 00000000..e947d1a2 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml @@ -0,0 +1,8 @@ +dataset_name: college_chemistry +description: 'The following are multiple choice questions (with answers) about college + chemistry. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_college_chemistry diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml new file mode 100644 index 00000000..a23b0bd3 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml @@ -0,0 +1,8 @@ +dataset_name: college_computer_science +description: 'The following are multiple choice questions (with answers) about college + computer science. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_college_computer_science diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml new file mode 100644 index 00000000..c03033eb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml @@ -0,0 +1,8 @@ +dataset_name: college_mathematics +description: 'The following are multiple choice questions (with answers) about college + mathematics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_college_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml new file mode 100644 index 00000000..64f952bb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml @@ -0,0 +1,8 @@ +dataset_name: college_medicine +description: 'The following are multiple choice questions (with answers) about college + medicine. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_college_medicine diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml new file mode 100644 index 00000000..f339c316 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml @@ -0,0 +1,8 @@ +dataset_name: college_physics +description: 'The following are multiple choice questions (with answers) about college + physics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_college_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml new file mode 100644 index 00000000..cc28f843 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml @@ -0,0 +1,8 @@ +dataset_name: computer_security +description: 'The following are multiple choice questions (with answers) about computer + security. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_computer_security diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml new file mode 100644 index 00000000..dc3c9096 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml @@ -0,0 +1,8 @@ +dataset_name: conceptual_physics +description: 'The following are multiple choice questions (with answers) about conceptual + physics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_conceptual_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_econometrics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_econometrics.yaml new file mode 100644 index 00000000..034c0e63 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_econometrics.yaml @@ -0,0 +1,7 @@ +dataset_name: econometrics +description: 'The following are multiple choice questions (with answers) about econometrics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_econometrics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml new file mode 100644 index 00000000..20823b42 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml @@ -0,0 +1,8 @@ +dataset_name: electrical_engineering +description: 'The following are multiple choice questions (with answers) about electrical + engineering. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_electrical_engineering diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml new file mode 100644 index 00000000..afed59aa --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml @@ -0,0 +1,8 @@ +dataset_name: elementary_mathematics +description: 'The following are multiple choice questions (with answers) about elementary + mathematics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_elementary_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml new file mode 100644 index 00000000..2a2359f5 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml @@ -0,0 +1,8 @@ +dataset_name: formal_logic +description: 'The following are multiple choice questions (with answers) about formal + logic. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_formal_logic diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml new file mode 100644 index 00000000..4d23b227 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml @@ -0,0 +1,8 @@ +dataset_name: global_facts +description: 'The following are multiple choice questions (with answers) about global + facts. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_global_facts diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml new file mode 100644 index 00000000..a9bdefee --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_biology +description: 'The following are multiple choice questions (with answers) about high + school biology. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_biology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml new file mode 100644 index 00000000..ec512f42 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_chemistry +description: 'The following are multiple choice questions (with answers) about high + school chemistry. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_chemistry diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml new file mode 100644 index 00000000..67d70ec6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_computer_science +description: 'The following are multiple choice questions (with answers) about high + school computer science. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_computer_science diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml new file mode 100644 index 00000000..62c6013b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_european_history +description: 'The following are multiple choice questions (with answers) about high + school european history. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_european_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml new file mode 100644 index 00000000..a4b6d856 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_geography +description: 'The following are multiple choice questions (with answers) about high + school geography. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_geography diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml new file mode 100644 index 00000000..f7c2cb8c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_government_and_politics +description: 'The following are multiple choice questions (with answers) about high + school government and politics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_government_and_politics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml new file mode 100644 index 00000000..b623360e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_macroeconomics +description: 'The following are multiple choice questions (with answers) about high + school macroeconomics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_macroeconomics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml new file mode 100644 index 00000000..b2ddfc0e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_mathematics +description: 'The following are multiple choice questions (with answers) about high + school mathematics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml new file mode 100644 index 00000000..e8bff48d --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_microeconomics +description: 'The following are multiple choice questions (with answers) about high + school microeconomics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_microeconomics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml new file mode 100644 index 00000000..2b97ac1d --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_physics +description: 'The following are multiple choice questions (with answers) about high + school physics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml new file mode 100644 index 00000000..fecb5f70 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_psychology +description: 'The following are multiple choice questions (with answers) about high + school psychology. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_psychology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml new file mode 100644 index 00000000..4ff766db --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_statistics +description: 'The following are multiple choice questions (with answers) about high + school statistics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_statistics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml new file mode 100644 index 00000000..f725e916 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_us_history +description: 'The following are multiple choice questions (with answers) about high + school us history. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_us_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml new file mode 100644 index 00000000..0142ce33 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml @@ -0,0 +1,8 @@ +dataset_name: high_school_world_history +description: 'The following are multiple choice questions (with answers) about high + school world history. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_high_school_world_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml new file mode 100644 index 00000000..4b007ca1 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml @@ -0,0 +1,8 @@ +dataset_name: human_aging +description: 'The following are multiple choice questions (with answers) about human + aging. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_human_aging diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml new file mode 100644 index 00000000..37d5e42a --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml @@ -0,0 +1,8 @@ +dataset_name: human_sexuality +description: 'The following are multiple choice questions (with answers) about human + sexuality. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_human_sexuality diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml new file mode 100644 index 00000000..03987fdf --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml @@ -0,0 +1,8 @@ +dataset_name: international_law +description: 'The following are multiple choice questions (with answers) about international + law. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_international_law diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_jurisprudence.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_jurisprudence.yaml new file mode 100644 index 00000000..a95f42ed --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_jurisprudence.yaml @@ -0,0 +1,7 @@ +dataset_name: jurisprudence +description: 'The following are multiple choice questions (with answers) about jurisprudence. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_jurisprudence diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml new file mode 100644 index 00000000..3b8b7b98 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml @@ -0,0 +1,8 @@ +dataset_name: logical_fallacies +description: 'The following are multiple choice questions (with answers) about logical + fallacies. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_logical_fallacies diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml new file mode 100644 index 00000000..473a2bc9 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml @@ -0,0 +1,8 @@ +dataset_name: machine_learning +description: 'The following are multiple choice questions (with answers) about machine + learning. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_machine_learning diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_management.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_management.yaml new file mode 100644 index 00000000..70eb8768 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_management.yaml @@ -0,0 +1,7 @@ +dataset_name: management +description: 'The following are multiple choice questions (with answers) about management. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_management diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_marketing.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_marketing.yaml new file mode 100644 index 00000000..48c03524 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_marketing.yaml @@ -0,0 +1,7 @@ +dataset_name: marketing +description: 'The following are multiple choice questions (with answers) about marketing. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_marketing diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml new file mode 100644 index 00000000..ef221495 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml @@ -0,0 +1,8 @@ +dataset_name: medical_genetics +description: 'The following are multiple choice questions (with answers) about medical + genetics. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_medical_genetics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_miscellaneous.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_miscellaneous.yaml new file mode 100644 index 00000000..bde2352b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_miscellaneous.yaml @@ -0,0 +1,7 @@ +dataset_name: miscellaneous +description: 'The following are multiple choice questions (with answers) about miscellaneous. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_miscellaneous diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml new file mode 100644 index 00000000..36ca7f98 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml @@ -0,0 +1,8 @@ +dataset_name: moral_disputes +description: 'The following are multiple choice questions (with answers) about moral + disputes. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_moral_disputes diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml new file mode 100644 index 00000000..5415d5b4 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml @@ -0,0 +1,8 @@ +dataset_name: moral_scenarios +description: 'The following are multiple choice questions (with answers) about moral + scenarios. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_moral_scenarios diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_nutrition.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_nutrition.yaml new file mode 100644 index 00000000..34c0040a --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_nutrition.yaml @@ -0,0 +1,7 @@ +dataset_name: nutrition +description: 'The following are multiple choice questions (with answers) about nutrition. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_nutrition diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_philosophy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_philosophy.yaml new file mode 100644 index 00000000..83588531 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_philosophy.yaml @@ -0,0 +1,7 @@ +dataset_name: philosophy +description: 'The following are multiple choice questions (with answers) about philosophy. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_philosophy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_prehistory.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_prehistory.yaml new file mode 100644 index 00000000..a94b514b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_prehistory.yaml @@ -0,0 +1,7 @@ +dataset_name: prehistory +description: 'The following are multiple choice questions (with answers) about prehistory. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_prehistory diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml new file mode 100644 index 00000000..98fa6bd8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_accounting +description: 'The following are multiple choice questions (with answers) about professional + accounting. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_professional_accounting diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml new file mode 100644 index 00000000..aada41a7 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_law +description: 'The following are multiple choice questions (with answers) about professional + law. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_professional_law diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml new file mode 100644 index 00000000..3febeb67 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_medicine +description: 'The following are multiple choice questions (with answers) about professional + medicine. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_professional_medicine diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml new file mode 100644 index 00000000..33b77f62 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_psychology +description: 'The following are multiple choice questions (with answers) about professional + psychology. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_professional_psychology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml new file mode 100644 index 00000000..dd7f9976 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml @@ -0,0 +1,8 @@ +dataset_name: public_relations +description: 'The following are multiple choice questions (with answers) about public + relations. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_public_relations diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml new file mode 100644 index 00000000..cc236bd4 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml @@ -0,0 +1,8 @@ +dataset_name: security_studies +description: 'The following are multiple choice questions (with answers) about security + studies. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_security_studies diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_sociology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_sociology.yaml new file mode 100644 index 00000000..11069f9e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_sociology.yaml @@ -0,0 +1,7 @@ +dataset_name: sociology +description: 'The following are multiple choice questions (with answers) about sociology. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_sociology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml new file mode 100644 index 00000000..42b51e5e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml @@ -0,0 +1,8 @@ +dataset_name: us_foreign_policy +description: 'The following are multiple choice questions (with answers) about us + foreign policy. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_us_foreign_policy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_virology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_virology.yaml new file mode 100644 index 00000000..9a9b94a1 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_virology.yaml @@ -0,0 +1,7 @@ +dataset_name: virology +description: 'The following are multiple choice questions (with answers) about virology. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_virology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml new file mode 100644 index 00000000..1525efe6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml @@ -0,0 +1,8 @@ +dataset_name: world_religions +description: 'The following are multiple choice questions (with answers) about world + religions. + + + ' +include: _mmlu_flan_loglikelihood_template_yaml +task: mmlu_flan_n_shot_loglikelihood_world_religions diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml new file mode 100644 index 00000000..a4e54c9e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml @@ -0,0 +1,8 @@ +dataset_name: machine_learning +description: 'The following are multiple choice questions (with answers) about machine + learning. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_machine_learning diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_management.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_management.yaml new file mode 100644 index 00000000..9c0c65b0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_management.yaml @@ -0,0 +1,7 @@ +dataset_name: management +description: 'The following are multiple choice questions (with answers) about management. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_management diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_marketing.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_marketing.yaml new file mode 100644 index 00000000..e2a74ca0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_marketing.yaml @@ -0,0 +1,7 @@ +dataset_name: marketing +description: 'The following are multiple choice questions (with answers) about marketing. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_marketing diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml new file mode 100644 index 00000000..2c27958f --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml @@ -0,0 +1,8 @@ +dataset_name: medical_genetics +description: 'The following are multiple choice questions (with answers) about medical + genetics. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_medical_genetics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_miscellaneous.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_miscellaneous.yaml new file mode 100644 index 00000000..389ca552 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_miscellaneous.yaml @@ -0,0 +1,7 @@ +dataset_name: miscellaneous +description: 'The following are multiple choice questions (with answers) about miscellaneous. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_miscellaneous diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml new file mode 100644 index 00000000..5f869327 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml @@ -0,0 +1,8 @@ +dataset_name: moral_disputes +description: 'The following are multiple choice questions (with answers) about moral + disputes. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_moral_disputes diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml new file mode 100644 index 00000000..ecc63596 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml @@ -0,0 +1,8 @@ +dataset_name: moral_scenarios +description: 'The following are multiple choice questions (with answers) about moral + scenarios. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_moral_scenarios diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_nutrition.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_nutrition.yaml new file mode 100644 index 00000000..6d2da5cb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_nutrition.yaml @@ -0,0 +1,7 @@ +dataset_name: nutrition +description: 'The following are multiple choice questions (with answers) about nutrition. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_nutrition diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_philosophy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_philosophy.yaml new file mode 100644 index 00000000..421c50f9 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_philosophy.yaml @@ -0,0 +1,7 @@ +dataset_name: philosophy +description: 'The following are multiple choice questions (with answers) about philosophy. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_philosophy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_prehistory.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_prehistory.yaml new file mode 100644 index 00000000..6e534911 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_prehistory.yaml @@ -0,0 +1,7 @@ +dataset_name: prehistory +description: 'The following are multiple choice questions (with answers) about prehistory. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_prehistory diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml new file mode 100644 index 00000000..93afd0fb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_accounting +description: 'The following are multiple choice questions (with answers) about professional + accounting. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_professional_accounting diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml new file mode 100644 index 00000000..d1e02680 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_law +description: 'The following are multiple choice questions (with answers) about professional + law. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_professional_law diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml new file mode 100644 index 00000000..2e39c273 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_medicine +description: 'The following are multiple choice questions (with answers) about professional + medicine. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_professional_medicine diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml new file mode 100644 index 00000000..2de37e23 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml @@ -0,0 +1,8 @@ +dataset_name: professional_psychology +description: 'The following are multiple choice questions (with answers) about professional + psychology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_professional_psychology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml new file mode 100644 index 00000000..d87a9a0a --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml @@ -0,0 +1,8 @@ +dataset_name: public_relations +description: 'The following are multiple choice questions (with answers) about public + relations. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_public_relations diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml new file mode 100644 index 00000000..84c4fa9e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml @@ -0,0 +1,8 @@ +dataset_name: security_studies +description: 'The following are multiple choice questions (with answers) about security + studies. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_security_studies diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_sociology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_sociology.yaml new file mode 100644 index 00000000..bca11a0a --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_sociology.yaml @@ -0,0 +1,7 @@ +dataset_name: sociology +description: 'The following are multiple choice questions (with answers) about sociology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_sociology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml new file mode 100644 index 00000000..4672df82 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml @@ -0,0 +1,8 @@ +dataset_name: us_foreign_policy +description: 'The following are multiple choice questions (with answers) about us + foreign policy. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_us_foreign_policy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_virology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_virology.yaml new file mode 100644 index 00000000..6f6d1680 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_virology.yaml @@ -0,0 +1,7 @@ +dataset_name: virology +description: 'The following are multiple choice questions (with answers) about virology. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_virology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml new file mode 100644 index 00000000..e53b98c8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml @@ -0,0 +1,8 @@ +dataset_name: world_religions +description: 'The following are multiple choice questions (with answers) about world + religions. + + + ' +include: _mmlu_flan_generative_template_yaml +task: mmlu_flan_n_shot_world_religions -- GitLab From 06d3406e55d4fdc4d8f92061ff8143257aad87ed Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 04:58:40 +0000 Subject: [PATCH 100/264] update --- .../flan/prompt_templates/flan_anli.yaml | 36 ++--- .../flan/prompt_templates/flan_bbh.yaml | 29 ---- .../flan/yaml_templates/held_in_template_yaml | 2 +- lm_eval/benchmarks/flan_held_in.yaml | 20 +-- lm_eval/benchmarks/flan_held_out.yaml | 4 +- lm_eval/benchmarks/t0_eval.yaml | 145 ++++++++++-------- lm_eval/tasks/bbh/_generate_configs.py | 3 + lm_eval/tasks/bbh/_template_yaml | 6 +- lm_eval/tasks/bbh/boolean_expressions.yaml | 4 - lm_eval/tasks/bbh/causal_judgement.yaml | 4 - lm_eval/tasks/bbh/date_understanding.yaml | 4 - lm_eval/tasks/bbh/disambiguation_qa.yaml | 4 - lm_eval/tasks/bbh/dyck_languages.yaml | 4 - lm_eval/tasks/bbh/formal_fallacies.yaml | 4 - lm_eval/tasks/bbh/geometric_shapes.yaml | 4 - lm_eval/tasks/bbh/hyperbaton.yaml | 4 - .../bbh/logical_deduction_five_objects.yaml | 4 - .../bbh/logical_deduction_seven_objects.yaml | 4 - .../bbh/logical_deduction_three_objects.yaml | 4 - lm_eval/tasks/bbh/movie_recommendation.yaml | 4 - .../tasks/bbh/multistep_arithmetic_two.yaml | 4 - lm_eval/tasks/bbh/navigate.yaml | 4 - lm_eval/tasks/bbh/object_counting.yaml | 4 - lm_eval/tasks/bbh/penguins_in_a_table.yaml | 4 - .../bbh/reasoning_about_colored_objects.yaml | 4 - lm_eval/tasks/bbh/ruin_names.yaml | 4 - .../salient_translation_error_detection.yaml | 4 - lm_eval/tasks/bbh/snarks.yaml | 4 - lm_eval/tasks/bbh/sports_understanding.yaml | 4 - lm_eval/tasks/bbh/temporal_sequences.yaml | 4 - ...racking_shuffled_objects_five_objects.yaml | 4 - ...acking_shuffled_objects_seven_objects.yaml | 4 - ...acking_shuffled_objects_three_objects.yaml | 4 - lm_eval/tasks/bbh/web_of_lies.yaml | 4 - lm_eval/tasks/bbh/word_sorting.yaml | 4 - lm_eval/tasks/mmlu/_generate_configs.py | 2 +- .../_mmlu_flan_generative_template_yaml | 17 +- .../_mmlu_flan_loglikelihood_template_yaml | 10 +- .../flan_n_shot/mmlu_abstract_algebra.yaml | 3 +- .../flan_n_shot/mmlu_business_ethics.yaml | 3 +- .../flan_n_shot/mmlu_clinical_knowledge.yaml | 3 +- .../flan_n_shot/mmlu_college_biology.yaml | 3 +- .../flan_n_shot/mmlu_college_chemistry.yaml | 3 +- .../mmlu_college_computer_science.yaml | 3 +- .../flan_n_shot/mmlu_college_mathematics.yaml | 3 +- .../flan_n_shot/mmlu_college_medicine.yaml | 3 +- .../flan_n_shot/mmlu_college_physics.yaml | 3 +- .../flan_n_shot/mmlu_computer_security.yaml | 3 +- .../flan_n_shot/mmlu_conceptual_physics.yaml | 3 +- .../mmlu_electrical_engineering.yaml | 3 +- .../mmlu_elementary_mathematics.yaml | 3 +- .../mmlu/flan_n_shot/mmlu_formal_logic.yaml | 3 +- .../mmlu/flan_n_shot/mmlu_global_facts.yaml | 3 +- .../flan_n_shot/mmlu_high_school_biology.yaml | 3 +- .../mmlu_high_school_chemistry.yaml | 3 +- .../mmlu_high_school_computer_science.yaml | 3 +- .../mmlu_high_school_european_history.yaml | 3 +- .../mmlu_high_school_geography.yaml | 3 +- ...u_high_school_government_and_politics.yaml | 3 +- .../mmlu_high_school_macroeconomics.yaml | 3 +- .../mmlu_high_school_mathematics.yaml | 3 +- .../mmlu_high_school_microeconomics.yaml | 3 +- .../flan_n_shot/mmlu_high_school_physics.yaml | 3 +- .../mmlu_high_school_psychology.yaml | 3 +- .../mmlu_high_school_statistics.yaml | 3 +- .../mmlu_high_school_us_history.yaml | 3 +- .../mmlu_high_school_world_history.yaml | 3 +- .../mmlu/flan_n_shot/mmlu_human_aging.yaml | 3 +- .../flan_n_shot/mmlu_human_sexuality.yaml | 3 +- .../flan_n_shot/mmlu_international_law.yaml | 3 +- .../flan_n_shot/mmlu_logical_fallacies.yaml | 3 +- .../mmlu_loglikelihood_abstract_algebra.yaml | 3 +- .../mmlu_loglikelihood_business_ethics.yaml | 3 +- ...mmlu_loglikelihood_clinical_knowledge.yaml | 3 +- .../mmlu_loglikelihood_college_biology.yaml | 3 +- .../mmlu_loglikelihood_college_chemistry.yaml | 3 +- ...oglikelihood_college_computer_science.yaml | 3 +- ...mlu_loglikelihood_college_mathematics.yaml | 3 +- .../mmlu_loglikelihood_college_medicine.yaml | 3 +- .../mmlu_loglikelihood_college_physics.yaml | 3 +- .../mmlu_loglikelihood_computer_security.yaml | 3 +- ...mmlu_loglikelihood_conceptual_physics.yaml | 3 +- ..._loglikelihood_electrical_engineering.yaml | 3 +- ..._loglikelihood_elementary_mathematics.yaml | 3 +- .../mmlu_loglikelihood_formal_logic.yaml | 3 +- .../mmlu_loglikelihood_global_facts.yaml | 3 +- ...mlu_loglikelihood_high_school_biology.yaml | 3 +- ...u_loglikelihood_high_school_chemistry.yaml | 3 +- ...kelihood_high_school_computer_science.yaml | 3 +- ...kelihood_high_school_european_history.yaml | 3 +- ...u_loglikelihood_high_school_geography.yaml | 3 +- ...d_high_school_government_and_politics.yaml | 3 +- ...likelihood_high_school_macroeconomics.yaml | 3 +- ...loglikelihood_high_school_mathematics.yaml | 3 +- ...likelihood_high_school_microeconomics.yaml | 3 +- ...mlu_loglikelihood_high_school_physics.yaml | 3 +- ..._loglikelihood_high_school_psychology.yaml | 3 +- ..._loglikelihood_high_school_statistics.yaml | 3 +- ..._loglikelihood_high_school_us_history.yaml | 3 +- ...glikelihood_high_school_world_history.yaml | 3 +- .../mmlu_loglikelihood_human_aging.yaml | 3 +- .../mmlu_loglikelihood_human_sexuality.yaml | 3 +- .../mmlu_loglikelihood_international_law.yaml | 3 +- .../mmlu_loglikelihood_logical_fallacies.yaml | 3 +- .../mmlu_loglikelihood_machine_learning.yaml | 3 +- .../mmlu_loglikelihood_medical_genetics.yaml | 3 +- .../mmlu_loglikelihood_moral_disputes.yaml | 3 +- .../mmlu_loglikelihood_moral_scenarios.yaml | 3 +- ...loglikelihood_professional_accounting.yaml | 3 +- .../mmlu_loglikelihood_professional_law.yaml | 3 +- ...u_loglikelihood_professional_medicine.yaml | 3 +- ...loglikelihood_professional_psychology.yaml | 3 +- .../mmlu_loglikelihood_public_relations.yaml | 3 +- .../mmlu_loglikelihood_security_studies.yaml | 3 +- .../mmlu_loglikelihood_us_foreign_policy.yaml | 3 +- .../mmlu_loglikelihood_world_religions.yaml | 3 +- .../flan_n_shot/mmlu_machine_learning.yaml | 3 +- .../flan_n_shot/mmlu_medical_genetics.yaml | 3 +- .../mmlu/flan_n_shot/mmlu_moral_disputes.yaml | 3 +- .../flan_n_shot/mmlu_moral_scenarios.yaml | 3 +- .../mmlu_professional_accounting.yaml | 3 +- .../flan_n_shot/mmlu_professional_law.yaml | 3 +- .../mmlu_professional_medicine.yaml | 3 +- .../mmlu_professional_psychology.yaml | 3 +- .../flan_n_shot/mmlu_public_relations.yaml | 3 +- .../flan_n_shot/mmlu_security_studies.yaml | 3 +- .../flan_n_shot/mmlu_us_foreign_policy.yaml | 3 +- .../flan_n_shot/mmlu_world_religions.yaml | 3 +- lm_eval/tasks/super_glue/cb/t5_utils.py | 4 +- 129 files changed, 224 insertions(+), 432 deletions(-) delete mode 100644 lm_eval/benchmarks/flan/prompt_templates/flan_bbh.yaml delete mode 100644 lm_eval/tasks/bbh/boolean_expressions.yaml delete mode 100644 lm_eval/tasks/bbh/causal_judgement.yaml delete mode 100644 lm_eval/tasks/bbh/date_understanding.yaml delete mode 100644 lm_eval/tasks/bbh/disambiguation_qa.yaml delete mode 100644 lm_eval/tasks/bbh/dyck_languages.yaml delete mode 100644 lm_eval/tasks/bbh/formal_fallacies.yaml delete mode 100644 lm_eval/tasks/bbh/geometric_shapes.yaml delete mode 100644 lm_eval/tasks/bbh/hyperbaton.yaml delete mode 100644 lm_eval/tasks/bbh/logical_deduction_five_objects.yaml delete mode 100644 lm_eval/tasks/bbh/logical_deduction_seven_objects.yaml delete mode 100644 lm_eval/tasks/bbh/logical_deduction_three_objects.yaml delete mode 100644 lm_eval/tasks/bbh/movie_recommendation.yaml delete mode 100644 lm_eval/tasks/bbh/multistep_arithmetic_two.yaml delete mode 100644 lm_eval/tasks/bbh/navigate.yaml delete mode 100644 lm_eval/tasks/bbh/object_counting.yaml delete mode 100644 lm_eval/tasks/bbh/penguins_in_a_table.yaml delete mode 100644 lm_eval/tasks/bbh/reasoning_about_colored_objects.yaml delete mode 100644 lm_eval/tasks/bbh/ruin_names.yaml delete mode 100644 lm_eval/tasks/bbh/salient_translation_error_detection.yaml delete mode 100644 lm_eval/tasks/bbh/snarks.yaml delete mode 100644 lm_eval/tasks/bbh/sports_understanding.yaml delete mode 100644 lm_eval/tasks/bbh/temporal_sequences.yaml delete mode 100644 lm_eval/tasks/bbh/tracking_shuffled_objects_five_objects.yaml delete mode 100644 lm_eval/tasks/bbh/tracking_shuffled_objects_seven_objects.yaml delete mode 100644 lm_eval/tasks/bbh/tracking_shuffled_objects_three_objects.yaml delete mode 100644 lm_eval/tasks/bbh/web_of_lies.yaml delete mode 100644 lm_eval/tasks/bbh/word_sorting.yaml diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml index 7dae0ce0..9b9f6705 100644 --- a/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml @@ -1,29 +1,29 @@ # Flan Prompt Templates prompts: "template-0": - doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is" - doc_to_target: """{{["Yes", "It's impossible to say", "No"][label]}}""" + doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nI think the answer is" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-1": - doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" + doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-2": - doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" + doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-3": - doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" + doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-4": - doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:" - doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" + doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nThe answer is:" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-5": - doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" - doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" + doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-6": - doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" + doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-7": - doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" + doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" "template-8": - doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{["Yes", "It's impossible to say", "No"][label]}}" + doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" + doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_bbh.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_bbh.yaml deleted file mode 100644 index 525e9e0c..00000000 --- a/lm_eval/benchmarks/flan/prompt_templates/flan_bbh.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Flan Prompt Templates -prompts: - "template-0": - doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" - "template-1": - doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" - "template-2": - doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" - "template-3": - doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" - "template-4": - doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" - "template-5": - doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" - "template-6": - doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" - "template-7": - doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" - "template-8": - doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" diff --git a/lm_eval/benchmarks/flan/yaml_templates/held_in_template_yaml b/lm_eval/benchmarks/flan/yaml_templates/held_in_template_yaml index f28774c6..2f4a4c84 100644 --- a/lm_eval/benchmarks/flan/yaml_templates/held_in_template_yaml +++ b/lm_eval/benchmarks/flan/yaml_templates/held_in_template_yaml @@ -8,6 +8,6 @@ metric_list: ignore_punctuation: true generation_kwargs: until: - - "\n\n" + - "" do_sample: false temperature: 0.0 diff --git a/lm_eval/benchmarks/flan_held_in.yaml b/lm_eval/benchmarks/flan_held_in.yaml index a560bda8..f6d62f03 100644 --- a/lm_eval/benchmarks/flan_held_in.yaml +++ b/lm_eval/benchmarks/flan_held_in.yaml @@ -25,13 +25,13 @@ task: dataset_path: anli use_prompt: flan/prompt_templates/flan_anli.yaml:* validation_split: dev_r3 - # - include: flan/yaml_templates/held_in_template_yaml - # task: ai2_arc - # dataset_path: ARC-Easy - # use_prompt: local:* - # validation_split: validation - # - include: flan/yaml_templates/held_in_template_yaml - # task: ai2_arc - # dataset_path: ARC-Challange - # use_prompt: local:* - # validation_split: validation + - include: flan/yaml_templates/held_in_template_yaml + task: ai2_arc + dataset_path: ARC-Easy + use_prompt: local:* + validation_split: validation + - include: flan/yaml_templates/held_in_template_yaml + task: ai2_arc + dataset_path: ARC-Challange + use_prompt: local:* + validation_split: validation diff --git a/lm_eval/benchmarks/flan_held_out.yaml b/lm_eval/benchmarks/flan_held_out.yaml index 4cd56468..cde82722 100644 --- a/lm_eval/benchmarks/flan_held_out.yaml +++ b/lm_eval/benchmarks/flan_held_out.yaml @@ -1,4 +1,4 @@ group: flan_held_out task: - - bbh - - mmlu + - bbh_flan + - mmlu_flan diff --git a/lm_eval/benchmarks/t0_eval.yaml b/lm_eval/benchmarks/t0_eval.yaml index 46c28d64..9cd25b51 100644 --- a/lm_eval/benchmarks/t0_eval.yaml +++ b/lm_eval/benchmarks/t0_eval.yaml @@ -6,6 +6,7 @@ task: use_prompt: promptsource:* training_split: train validation_split: validation + output_type: greedy_until metric_list: - metric: exact_match aggregation: mean @@ -18,18 +19,6 @@ task: use_prompt: promptsource:* training_split: train validation_split: validation - metric_list: - - metric: exact_match - aggregation: mean - higher_is_better: true - ignore_case: true - ignore_punctuation: true - # Natural Language Inference - - dataset_path: super_glue - dataset_name: cb - use_prompt: promptsource:* - training_split: train - validation_split: validation output_type: greedy_until metric_list: - metric: exact_match @@ -37,67 +26,86 @@ task: higher_is_better: true ignore_case: true ignore_punctuation: true - - dataset_path: super_glue - dataset_name: rte - use_prompt: promptsource:* - training_split: train - validation_split: validation - metric_list: - - metric: exact_match - aggregation: mean - higher_is_better: true - ignore_case: true - ignore_punctuation: true - - task: anli_r1 - dataset_path: anli - use_prompt: promptsource:* - training_split: train_r1 - validation_split: dev_r1 - metric_list: - - metric: exact_match - aggregation: mean - higher_is_better: true - ignore_case: true - ignore_punctuation: true - - task: anli_r2 - dataset_path: anli - use_prompt: promptsource:* - training_split: train_r2 - validation_split: dev_r2 - metric_list: - - metric: exact_match - aggregation: mean - higher_is_better: true - ignore_case: true - ignore_punctuation: true - - task: anli_r3 - dataset_path: anli - use_prompt: promptsource:* - training_split: train_r3 - validation_split: dev_r3 - metric_list: - - metric: exact_match - aggregation: mean - higher_is_better: true - ignore_case: true - ignore_punctuation: true - # Sentence Completion - - dataset_path: super_glue - dataset_name: copa - use_prompt: promptsource:* - training_split: train - validation_split: validation - metric_list: - - metric: exact_match - aggregation: mean - higher_is_better: true - ignore_case: true - ignore_punctuation: true + # # Natural Language Inference + # - dataset_path: super_glue + # dataset_name: cb + # use_prompt: promptsource:* + # training_split: train + # validation_split: validation + # output_type: greedy_until + # metric_list: + # - metric: exact_match + # aggregation: mean + # higher_is_better: true + # ignore_case: true + # ignore_punctuation: true + # - dataset_path: super_glue + # dataset_name: rte + # use_prompt: promptsource:* + # training_split: train + # validation_split: validation + # output_type: greedy_until + # metric_list: + # - metric: exact_match + # aggregation: mean + # higher_is_better: true + # ignore_case: true + # ignore_punctuation: true + # - task: anli_r1 + # dataset_path: anli + # use_prompt: promptsource:* + # training_split: train_r1 + # validation_split: dev_r1 + # output_type: greedy_until + # metric_list: + # - metric: exact_match + # aggregation: mean + # higher_is_better: true + # ignore_case: true + # ignore_punctuation: true + # - task: anli_r2 + # dataset_path: anli + # use_prompt: promptsource:* + # training_split: train_r2 + # validation_split: dev_r2 + # output_type: greedy_until + # metric_list: + # - metric: exact_match + # aggregation: mean + # higher_is_better: true + # ignore_case: true + # ignore_punctuation: true + # - task: anli_r3 + # dataset_path: anli + # use_prompt: promptsource:* + # training_split: train_r3 + # validation_split: dev_r3 + # output_type: greedy_until + # metric_list: + # - metric: exact_match + # aggregation: mean + # higher_is_better: true + # ignore_case: true + # ignore_punctuation: true + # # Sentence Completion + # - dataset_path: super_glue + # dataset_name: copa + # use_prompt: promptsource:* + # training_split: train + # validation_split: validation + # output_type: greedy_until + # metric_list: + # - metric: exact_match + # aggregation: mean + # higher_is_better: true + # ignore_case: true + # ignore_punctuation: true # Natural Language Inference - dataset_path: hellaswag use_prompt: promptsource:* training_split: train validation_split: validation + output_type: greedy_until metric_list: - metric: exact_match aggregation: mean @@ -110,6 +118,7 @@ task: use_prompt: promptsource:* training_split: train validation_split: validation + output_type: greedy_until metric_list: - metric: exact_match aggregation: mean diff --git a/lm_eval/tasks/bbh/_generate_configs.py b/lm_eval/tasks/bbh/_generate_configs.py index 40e4c07d..9e603994 100644 --- a/lm_eval/tasks/bbh/_generate_configs.py +++ b/lm_eval/tasks/bbh/_generate_configs.py @@ -27,3 +27,6 @@ def main() -> None: if __name__ == "__main__": main() + + +# https://raw.githubusercontent.com/suzgunmirac/BIG-Bench-Hard/main/cot-prompts/boolean_expressions.txt diff --git a/lm_eval/tasks/bbh/_template_yaml b/lm_eval/tasks/bbh/_template_yaml index af6b74b3..3b174480 100644 --- a/lm_eval/tasks/bbh/_template_yaml +++ b/lm_eval/tasks/bbh/_template_yaml @@ -2,16 +2,14 @@ group: bbh dataset_path: lukaemon/bbh output_type: greedy_until test_split: test -doc_to_text: "{{input}}" +doc_to_text: "Q: {{input}}\nA:" doc_to_target: "{{target}}" metric_list: - metric: exact_match aggregation: mean higher_is_better: true - ignore_case: true - ignore_punctuation: false generation_kwargs: until: - - "\n\n" + - "" do_sample: false temperature: 0.0 diff --git a/lm_eval/tasks/bbh/boolean_expressions.yaml b/lm_eval/tasks/bbh/boolean_expressions.yaml deleted file mode 100644 index d9895c81..00000000 --- a/lm_eval/tasks/bbh/boolean_expressions.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: boolean_expressions -include: _template_yaml -task: bbh_boolean_expressions diff --git a/lm_eval/tasks/bbh/causal_judgement.yaml b/lm_eval/tasks/bbh/causal_judgement.yaml deleted file mode 100644 index c3d48d53..00000000 --- a/lm_eval/tasks/bbh/causal_judgement.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: causal_judgement -include: _template_yaml -task: bbh_causal_judgement diff --git a/lm_eval/tasks/bbh/date_understanding.yaml b/lm_eval/tasks/bbh/date_understanding.yaml deleted file mode 100644 index 5f60efbe..00000000 --- a/lm_eval/tasks/bbh/date_understanding.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: date_understanding -include: _template_yaml -task: bbh_date_understanding diff --git a/lm_eval/tasks/bbh/disambiguation_qa.yaml b/lm_eval/tasks/bbh/disambiguation_qa.yaml deleted file mode 100644 index b043460e..00000000 --- a/lm_eval/tasks/bbh/disambiguation_qa.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: disambiguation_qa -include: _template_yaml -task: bbh_disambiguation_qa diff --git a/lm_eval/tasks/bbh/dyck_languages.yaml b/lm_eval/tasks/bbh/dyck_languages.yaml deleted file mode 100644 index 6b6648d0..00000000 --- a/lm_eval/tasks/bbh/dyck_languages.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: dyck_languages -include: _template_yaml -task: bbh_dyck_languages diff --git a/lm_eval/tasks/bbh/formal_fallacies.yaml b/lm_eval/tasks/bbh/formal_fallacies.yaml deleted file mode 100644 index 18d30c91..00000000 --- a/lm_eval/tasks/bbh/formal_fallacies.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: formal_fallacies -include: _template_yaml -task: bbh_formal_fallacies diff --git a/lm_eval/tasks/bbh/geometric_shapes.yaml b/lm_eval/tasks/bbh/geometric_shapes.yaml deleted file mode 100644 index 9616b6d6..00000000 --- a/lm_eval/tasks/bbh/geometric_shapes.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: geometric_shapes -include: _template_yaml -task: bbh_geometric_shapes diff --git a/lm_eval/tasks/bbh/hyperbaton.yaml b/lm_eval/tasks/bbh/hyperbaton.yaml deleted file mode 100644 index d1ff5bf8..00000000 --- a/lm_eval/tasks/bbh/hyperbaton.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: hyperbaton -include: _template_yaml -task: bbh_hyperbaton diff --git a/lm_eval/tasks/bbh/logical_deduction_five_objects.yaml b/lm_eval/tasks/bbh/logical_deduction_five_objects.yaml deleted file mode 100644 index 91e6ec74..00000000 --- a/lm_eval/tasks/bbh/logical_deduction_five_objects.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: logical_deduction_five_objects -include: _template_yaml -task: bbh_logical_deduction_five_objects diff --git a/lm_eval/tasks/bbh/logical_deduction_seven_objects.yaml b/lm_eval/tasks/bbh/logical_deduction_seven_objects.yaml deleted file mode 100644 index 342cf563..00000000 --- a/lm_eval/tasks/bbh/logical_deduction_seven_objects.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: logical_deduction_seven_objects -include: _template_yaml -task: bbh_logical_deduction_seven_objects diff --git a/lm_eval/tasks/bbh/logical_deduction_three_objects.yaml b/lm_eval/tasks/bbh/logical_deduction_three_objects.yaml deleted file mode 100644 index 6669c6c8..00000000 --- a/lm_eval/tasks/bbh/logical_deduction_three_objects.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: logical_deduction_three_objects -include: _template_yaml -task: bbh_logical_deduction_three_objects diff --git a/lm_eval/tasks/bbh/movie_recommendation.yaml b/lm_eval/tasks/bbh/movie_recommendation.yaml deleted file mode 100644 index af884eec..00000000 --- a/lm_eval/tasks/bbh/movie_recommendation.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: movie_recommendation -include: _template_yaml -task: bbh_movie_recommendation diff --git a/lm_eval/tasks/bbh/multistep_arithmetic_two.yaml b/lm_eval/tasks/bbh/multistep_arithmetic_two.yaml deleted file mode 100644 index 2ab191b8..00000000 --- a/lm_eval/tasks/bbh/multistep_arithmetic_two.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: multistep_arithmetic_two -include: _template_yaml -task: bbh_multistep_arithmetic_two diff --git a/lm_eval/tasks/bbh/navigate.yaml b/lm_eval/tasks/bbh/navigate.yaml deleted file mode 100644 index f737a9c5..00000000 --- a/lm_eval/tasks/bbh/navigate.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: navigate -include: _template_yaml -task: bbh_navigate diff --git a/lm_eval/tasks/bbh/object_counting.yaml b/lm_eval/tasks/bbh/object_counting.yaml deleted file mode 100644 index 606bd92d..00000000 --- a/lm_eval/tasks/bbh/object_counting.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: object_counting -include: _template_yaml -task: bbh_object_counting diff --git a/lm_eval/tasks/bbh/penguins_in_a_table.yaml b/lm_eval/tasks/bbh/penguins_in_a_table.yaml deleted file mode 100644 index 25e183ce..00000000 --- a/lm_eval/tasks/bbh/penguins_in_a_table.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: penguins_in_a_table -include: _template_yaml -task: bbh_penguins_in_a_table diff --git a/lm_eval/tasks/bbh/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bbh/reasoning_about_colored_objects.yaml deleted file mode 100644 index 785e0b2e..00000000 --- a/lm_eval/tasks/bbh/reasoning_about_colored_objects.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: reasoning_about_colored_objects -include: _template_yaml -task: bbh_reasoning_about_colored_objects diff --git a/lm_eval/tasks/bbh/ruin_names.yaml b/lm_eval/tasks/bbh/ruin_names.yaml deleted file mode 100644 index aef28b1c..00000000 --- a/lm_eval/tasks/bbh/ruin_names.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: ruin_names -include: _template_yaml -task: bbh_ruin_names diff --git a/lm_eval/tasks/bbh/salient_translation_error_detection.yaml b/lm_eval/tasks/bbh/salient_translation_error_detection.yaml deleted file mode 100644 index 433867fe..00000000 --- a/lm_eval/tasks/bbh/salient_translation_error_detection.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: salient_translation_error_detection -include: _template_yaml -task: bbh_salient_translation_error_detection diff --git a/lm_eval/tasks/bbh/snarks.yaml b/lm_eval/tasks/bbh/snarks.yaml deleted file mode 100644 index 49f57d20..00000000 --- a/lm_eval/tasks/bbh/snarks.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: snarks -include: _template_yaml -task: bbh_snarks diff --git a/lm_eval/tasks/bbh/sports_understanding.yaml b/lm_eval/tasks/bbh/sports_understanding.yaml deleted file mode 100644 index cf84b1e1..00000000 --- a/lm_eval/tasks/bbh/sports_understanding.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: sports_understanding -include: _template_yaml -task: bbh_sports_understanding diff --git a/lm_eval/tasks/bbh/temporal_sequences.yaml b/lm_eval/tasks/bbh/temporal_sequences.yaml deleted file mode 100644 index b3f5c0af..00000000 --- a/lm_eval/tasks/bbh/temporal_sequences.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: temporal_sequences -include: _template_yaml -task: bbh_temporal_sequences diff --git a/lm_eval/tasks/bbh/tracking_shuffled_objects_five_objects.yaml b/lm_eval/tasks/bbh/tracking_shuffled_objects_five_objects.yaml deleted file mode 100644 index d4ca2fe0..00000000 --- a/lm_eval/tasks/bbh/tracking_shuffled_objects_five_objects.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: tracking_shuffled_objects_five_objects -include: _template_yaml -task: bbh_tracking_shuffled_objects_five_objects diff --git a/lm_eval/tasks/bbh/tracking_shuffled_objects_seven_objects.yaml b/lm_eval/tasks/bbh/tracking_shuffled_objects_seven_objects.yaml deleted file mode 100644 index 20fff820..00000000 --- a/lm_eval/tasks/bbh/tracking_shuffled_objects_seven_objects.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: tracking_shuffled_objects_seven_objects -include: _template_yaml -task: bbh_tracking_shuffled_objects_seven_objects diff --git a/lm_eval/tasks/bbh/tracking_shuffled_objects_three_objects.yaml b/lm_eval/tasks/bbh/tracking_shuffled_objects_three_objects.yaml deleted file mode 100644 index f219d30c..00000000 --- a/lm_eval/tasks/bbh/tracking_shuffled_objects_three_objects.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: tracking_shuffled_objects_three_objects -include: _template_yaml -task: bbh_tracking_shuffled_objects_three_objects diff --git a/lm_eval/tasks/bbh/web_of_lies.yaml b/lm_eval/tasks/bbh/web_of_lies.yaml deleted file mode 100644 index 18dcb591..00000000 --- a/lm_eval/tasks/bbh/web_of_lies.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: web_of_lies -include: _template_yaml -task: bbh_web_of_lies diff --git a/lm_eval/tasks/bbh/word_sorting.yaml b/lm_eval/tasks/bbh/word_sorting.yaml deleted file mode 100644 index 11725e0a..00000000 --- a/lm_eval/tasks/bbh/word_sorting.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Generated by _generate_configs.py -dataset_name: word_sorting -include: _template_yaml -task: bbh_word_sorting diff --git a/lm_eval/tasks/mmlu/_generate_configs.py b/lm_eval/tasks/mmlu/_generate_configs.py index db0e20cd..af9bd0c6 100644 --- a/lm_eval/tasks/mmlu/_generate_configs.py +++ b/lm_eval/tasks/mmlu/_generate_configs.py @@ -115,4 +115,4 @@ if __name__ == "__main__": file_save_path = args.save_prefix_path + f"_{subject}.yaml" eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}") with open(file_save_path, "w") as yaml_file: - yaml.dump(yaml_dict, yaml_file) + yaml.dump(yaml_dict, yaml_file, width=float("inf")) diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml index b369024c..3f649666 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml @@ -1,18 +1,21 @@ group: mmlu_flan dataset_path: cais/mmlu -validation_split: validation +# validation_split: validation +test_split: test fewshot_split: dev -doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA:" +# doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: " +doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" output_type: greedy_until -doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}" +# doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}" +doc_to_target: "{{['A', 'B', 'C', 'D'][answer]}}" metric_list: - metric: exact_match aggregation: mean higher_is_better: true - ignore_case: true - ignore_punctuation: true + # ignore_case: true + # ignore_punctuation: true generation_kwargs: until: - "" - do_sample: false - temperature: 0.0 \ No newline at end of file +# do_sample: false +# temperature: 0.0 \ No newline at end of file diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml index eb38e0fe..2a09f787 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml @@ -1,12 +1,16 @@ group: mmlu_flan_loglikelihood dataset_path: cais/mmlu -validation_split: validation +# validation_split: validation +test_split: test fewshot_split: dev -doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA:" output_type: multiple_choice -doc_to_choice: ['(A)', '(B)', '(C)', '(D)'] +doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" +doc_to_choice: ["A", "B", "C", "D"] doc_to_target: answer metric_list: - metric: acc aggregation: mean higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true \ No newline at end of file diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml index 9fca2117..31729f37 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml @@ -1,6 +1,5 @@ dataset_name: abstract_algebra -description: 'The following are multiple choice questions (with answers) about abstract - algebra. +description: 'The following are multiple choice questions (with answers) about abstract algebra. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml index f75a48fd..d1dcf3c7 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml @@ -1,6 +1,5 @@ dataset_name: business_ethics -description: 'The following are multiple choice questions (with answers) about business - ethics. +description: 'The following are multiple choice questions (with answers) about business ethics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml index 07a3fe79..14b12359 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml @@ -1,6 +1,5 @@ dataset_name: clinical_knowledge -description: 'The following are multiple choice questions (with answers) about clinical - knowledge. +description: 'The following are multiple choice questions (with answers) about clinical knowledge. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml index 7465f0d3..0d202b8e 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml @@ -1,6 +1,5 @@ dataset_name: college_biology -description: 'The following are multiple choice questions (with answers) about college - biology. +description: 'The following are multiple choice questions (with answers) about college biology. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml index 17e7fbde..77f6328f 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml @@ -1,6 +1,5 @@ dataset_name: college_chemistry -description: 'The following are multiple choice questions (with answers) about college - chemistry. +description: 'The following are multiple choice questions (with answers) about college chemistry. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml index d0032874..f5cbda28 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml @@ -1,6 +1,5 @@ dataset_name: college_computer_science -description: 'The following are multiple choice questions (with answers) about college - computer science. +description: 'The following are multiple choice questions (with answers) about college computer science. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml index be1e01b2..dbc9be4c 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml @@ -1,6 +1,5 @@ dataset_name: college_mathematics -description: 'The following are multiple choice questions (with answers) about college - mathematics. +description: 'The following are multiple choice questions (with answers) about college mathematics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml index 4c8aa79e..efc868f0 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml @@ -1,6 +1,5 @@ dataset_name: college_medicine -description: 'The following are multiple choice questions (with answers) about college - medicine. +description: 'The following are multiple choice questions (with answers) about college medicine. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml index cd07980b..d92c14ea 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml @@ -1,6 +1,5 @@ dataset_name: college_physics -description: 'The following are multiple choice questions (with answers) about college - physics. +description: 'The following are multiple choice questions (with answers) about college physics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml index 93dc9040..3ddf3ee5 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml @@ -1,6 +1,5 @@ dataset_name: computer_security -description: 'The following are multiple choice questions (with answers) about computer - security. +description: 'The following are multiple choice questions (with answers) about computer security. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml index 2f313298..7c4f90ed 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml @@ -1,6 +1,5 @@ dataset_name: conceptual_physics -description: 'The following are multiple choice questions (with answers) about conceptual - physics. +description: 'The following are multiple choice questions (with answers) about conceptual physics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml index 5d1ccca1..0308fe16 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml @@ -1,6 +1,5 @@ dataset_name: electrical_engineering -description: 'The following are multiple choice questions (with answers) about electrical - engineering. +description: 'The following are multiple choice questions (with answers) about electrical engineering. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml index 7260b752..2b8a8caf 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml @@ -1,6 +1,5 @@ dataset_name: elementary_mathematics -description: 'The following are multiple choice questions (with answers) about elementary - mathematics. +description: 'The following are multiple choice questions (with answers) about elementary mathematics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml index 7dd42af5..10f58f41 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml @@ -1,6 +1,5 @@ dataset_name: formal_logic -description: 'The following are multiple choice questions (with answers) about formal - logic. +description: 'The following are multiple choice questions (with answers) about formal logic. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml index 373f99fb..48816fe3 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml @@ -1,6 +1,5 @@ dataset_name: global_facts -description: 'The following are multiple choice questions (with answers) about global - facts. +description: 'The following are multiple choice questions (with answers) about global facts. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml index 334286dc..ebb1ded2 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_biology -description: 'The following are multiple choice questions (with answers) about high - school biology. +description: 'The following are multiple choice questions (with answers) about high school biology. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml index f4d3bcfb..66a484a3 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_chemistry -description: 'The following are multiple choice questions (with answers) about high - school chemistry. +description: 'The following are multiple choice questions (with answers) about high school chemistry. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml index 03e0411b..b9a9060c 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_computer_science -description: 'The following are multiple choice questions (with answers) about high - school computer science. +description: 'The following are multiple choice questions (with answers) about high school computer science. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml index a7a73a24..f89cca29 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_european_history -description: 'The following are multiple choice questions (with answers) about high - school european history. +description: 'The following are multiple choice questions (with answers) about high school european history. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml index d7b29960..f255d37a 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_geography -description: 'The following are multiple choice questions (with answers) about high - school geography. +description: 'The following are multiple choice questions (with answers) about high school geography. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml index dbb195e2..108aebf8 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_government_and_politics -description: 'The following are multiple choice questions (with answers) about high - school government and politics. +description: 'The following are multiple choice questions (with answers) about high school government and politics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml index a8e6c4cd..720baeac 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_macroeconomics -description: 'The following are multiple choice questions (with answers) about high - school macroeconomics. +description: 'The following are multiple choice questions (with answers) about high school macroeconomics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml index 35adc8b7..fbad67d6 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_mathematics -description: 'The following are multiple choice questions (with answers) about high - school mathematics. +description: 'The following are multiple choice questions (with answers) about high school mathematics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml index a0887261..4b4e85bd 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_microeconomics -description: 'The following are multiple choice questions (with answers) about high - school microeconomics. +description: 'The following are multiple choice questions (with answers) about high school microeconomics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml index 63a77a7d..941d6c22 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_physics -description: 'The following are multiple choice questions (with answers) about high - school physics. +description: 'The following are multiple choice questions (with answers) about high school physics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml index 7d738494..831907f0 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_psychology -description: 'The following are multiple choice questions (with answers) about high - school psychology. +description: 'The following are multiple choice questions (with answers) about high school psychology. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml index 6b3e4b5a..255c7394 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_statistics -description: 'The following are multiple choice questions (with answers) about high - school statistics. +description: 'The following are multiple choice questions (with answers) about high school statistics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml index e80b64e1..4ea76cd3 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_us_history -description: 'The following are multiple choice questions (with answers) about high - school us history. +description: 'The following are multiple choice questions (with answers) about high school us history. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml index 83d55ae6..26551e82 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_world_history -description: 'The following are multiple choice questions (with answers) about high - school world history. +description: 'The following are multiple choice questions (with answers) about high school world history. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml index 2ff416b8..042e81cf 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml @@ -1,6 +1,5 @@ dataset_name: human_aging -description: 'The following are multiple choice questions (with answers) about human - aging. +description: 'The following are multiple choice questions (with answers) about human aging. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml index 2d7316ac..d2d55b70 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml @@ -1,6 +1,5 @@ dataset_name: human_sexuality -description: 'The following are multiple choice questions (with answers) about human - sexuality. +description: 'The following are multiple choice questions (with answers) about human sexuality. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml index 547cb3c0..12b18807 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml @@ -1,6 +1,5 @@ dataset_name: international_law -description: 'The following are multiple choice questions (with answers) about international - law. +description: 'The following are multiple choice questions (with answers) about international law. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml index 1b8a4d2b..aaaef665 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml @@ -1,6 +1,5 @@ dataset_name: logical_fallacies -description: 'The following are multiple choice questions (with answers) about logical - fallacies. +description: 'The following are multiple choice questions (with answers) about logical fallacies. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml index 60973953..19f6d3ee 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml @@ -1,6 +1,5 @@ dataset_name: abstract_algebra -description: 'The following are multiple choice questions (with answers) about abstract - algebra. +description: 'The following are multiple choice questions (with answers) about abstract algebra. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml index 3f2bcc77..b6917938 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml @@ -1,6 +1,5 @@ dataset_name: business_ethics -description: 'The following are multiple choice questions (with answers) about business - ethics. +description: 'The following are multiple choice questions (with answers) about business ethics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml index 780c8bf6..bc19e7c5 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml @@ -1,6 +1,5 @@ dataset_name: clinical_knowledge -description: 'The following are multiple choice questions (with answers) about clinical - knowledge. +description: 'The following are multiple choice questions (with answers) about clinical knowledge. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml index 7d270f47..defc3d98 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml @@ -1,6 +1,5 @@ dataset_name: college_biology -description: 'The following are multiple choice questions (with answers) about college - biology. +description: 'The following are multiple choice questions (with answers) about college biology. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml index e947d1a2..15a2b3cc 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml @@ -1,6 +1,5 @@ dataset_name: college_chemistry -description: 'The following are multiple choice questions (with answers) about college - chemistry. +description: 'The following are multiple choice questions (with answers) about college chemistry. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml index a23b0bd3..ff69b70e 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml @@ -1,6 +1,5 @@ dataset_name: college_computer_science -description: 'The following are multiple choice questions (with answers) about college - computer science. +description: 'The following are multiple choice questions (with answers) about college computer science. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml index c03033eb..fb67c2ee 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml @@ -1,6 +1,5 @@ dataset_name: college_mathematics -description: 'The following are multiple choice questions (with answers) about college - mathematics. +description: 'The following are multiple choice questions (with answers) about college mathematics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml index 64f952bb..6edac775 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml @@ -1,6 +1,5 @@ dataset_name: college_medicine -description: 'The following are multiple choice questions (with answers) about college - medicine. +description: 'The following are multiple choice questions (with answers) about college medicine. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml index f339c316..2af2929f 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml @@ -1,6 +1,5 @@ dataset_name: college_physics -description: 'The following are multiple choice questions (with answers) about college - physics. +description: 'The following are multiple choice questions (with answers) about college physics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml index cc28f843..fe239463 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml @@ -1,6 +1,5 @@ dataset_name: computer_security -description: 'The following are multiple choice questions (with answers) about computer - security. +description: 'The following are multiple choice questions (with answers) about computer security. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml index dc3c9096..a593cb97 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml @@ -1,6 +1,5 @@ dataset_name: conceptual_physics -description: 'The following are multiple choice questions (with answers) about conceptual - physics. +description: 'The following are multiple choice questions (with answers) about conceptual physics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml index 20823b42..06e8e3b0 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml @@ -1,6 +1,5 @@ dataset_name: electrical_engineering -description: 'The following are multiple choice questions (with answers) about electrical - engineering. +description: 'The following are multiple choice questions (with answers) about electrical engineering. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml index afed59aa..ea151100 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml @@ -1,6 +1,5 @@ dataset_name: elementary_mathematics -description: 'The following are multiple choice questions (with answers) about elementary - mathematics. +description: 'The following are multiple choice questions (with answers) about elementary mathematics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml index 2a2359f5..2a2299fb 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml @@ -1,6 +1,5 @@ dataset_name: formal_logic -description: 'The following are multiple choice questions (with answers) about formal - logic. +description: 'The following are multiple choice questions (with answers) about formal logic. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml index 4d23b227..b3c003f3 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml @@ -1,6 +1,5 @@ dataset_name: global_facts -description: 'The following are multiple choice questions (with answers) about global - facts. +description: 'The following are multiple choice questions (with answers) about global facts. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml index a9bdefee..6a28adbd 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_biology -description: 'The following are multiple choice questions (with answers) about high - school biology. +description: 'The following are multiple choice questions (with answers) about high school biology. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml index ec512f42..dedab0ad 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_chemistry -description: 'The following are multiple choice questions (with answers) about high - school chemistry. +description: 'The following are multiple choice questions (with answers) about high school chemistry. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml index 67d70ec6..8c096fd8 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_computer_science -description: 'The following are multiple choice questions (with answers) about high - school computer science. +description: 'The following are multiple choice questions (with answers) about high school computer science. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml index 62c6013b..8b2a2705 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_european_history -description: 'The following are multiple choice questions (with answers) about high - school european history. +description: 'The following are multiple choice questions (with answers) about high school european history. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml index a4b6d856..32bcc3e1 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_geography -description: 'The following are multiple choice questions (with answers) about high - school geography. +description: 'The following are multiple choice questions (with answers) about high school geography. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml index f7c2cb8c..191bc63b 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_government_and_politics -description: 'The following are multiple choice questions (with answers) about high - school government and politics. +description: 'The following are multiple choice questions (with answers) about high school government and politics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml index b623360e..838ffed9 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_macroeconomics -description: 'The following are multiple choice questions (with answers) about high - school macroeconomics. +description: 'The following are multiple choice questions (with answers) about high school macroeconomics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml index b2ddfc0e..246d8988 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_mathematics -description: 'The following are multiple choice questions (with answers) about high - school mathematics. +description: 'The following are multiple choice questions (with answers) about high school mathematics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml index e8bff48d..1fea1850 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_microeconomics -description: 'The following are multiple choice questions (with answers) about high - school microeconomics. +description: 'The following are multiple choice questions (with answers) about high school microeconomics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml index 2b97ac1d..6aa802e8 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_physics -description: 'The following are multiple choice questions (with answers) about high - school physics. +description: 'The following are multiple choice questions (with answers) about high school physics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml index fecb5f70..521b3e54 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_psychology -description: 'The following are multiple choice questions (with answers) about high - school psychology. +description: 'The following are multiple choice questions (with answers) about high school psychology. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml index 4ff766db..3cd82472 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_statistics -description: 'The following are multiple choice questions (with answers) about high - school statistics. +description: 'The following are multiple choice questions (with answers) about high school statistics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml index f725e916..34a7d05d 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_us_history -description: 'The following are multiple choice questions (with answers) about high - school us history. +description: 'The following are multiple choice questions (with answers) about high school us history. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml index 0142ce33..b6390aa3 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml @@ -1,6 +1,5 @@ dataset_name: high_school_world_history -description: 'The following are multiple choice questions (with answers) about high - school world history. +description: 'The following are multiple choice questions (with answers) about high school world history. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml index 4b007ca1..bf454427 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml @@ -1,6 +1,5 @@ dataset_name: human_aging -description: 'The following are multiple choice questions (with answers) about human - aging. +description: 'The following are multiple choice questions (with answers) about human aging. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml index 37d5e42a..7bec1aa9 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml @@ -1,6 +1,5 @@ dataset_name: human_sexuality -description: 'The following are multiple choice questions (with answers) about human - sexuality. +description: 'The following are multiple choice questions (with answers) about human sexuality. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml index 03987fdf..6d56237f 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml @@ -1,6 +1,5 @@ dataset_name: international_law -description: 'The following are multiple choice questions (with answers) about international - law. +description: 'The following are multiple choice questions (with answers) about international law. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml index 3b8b7b98..bc2b1b41 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml @@ -1,6 +1,5 @@ dataset_name: logical_fallacies -description: 'The following are multiple choice questions (with answers) about logical - fallacies. +description: 'The following are multiple choice questions (with answers) about logical fallacies. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml index 473a2bc9..3688fd28 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml @@ -1,6 +1,5 @@ dataset_name: machine_learning -description: 'The following are multiple choice questions (with answers) about machine - learning. +description: 'The following are multiple choice questions (with answers) about machine learning. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml index ef221495..68bd9c12 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml @@ -1,6 +1,5 @@ dataset_name: medical_genetics -description: 'The following are multiple choice questions (with answers) about medical - genetics. +description: 'The following are multiple choice questions (with answers) about medical genetics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml index 36ca7f98..c55d44bd 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml @@ -1,6 +1,5 @@ dataset_name: moral_disputes -description: 'The following are multiple choice questions (with answers) about moral - disputes. +description: 'The following are multiple choice questions (with answers) about moral disputes. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml index 5415d5b4..99bcb848 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml @@ -1,6 +1,5 @@ dataset_name: moral_scenarios -description: 'The following are multiple choice questions (with answers) about moral - scenarios. +description: 'The following are multiple choice questions (with answers) about moral scenarios. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml index 98fa6bd8..2f64beae 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml @@ -1,6 +1,5 @@ dataset_name: professional_accounting -description: 'The following are multiple choice questions (with answers) about professional - accounting. +description: 'The following are multiple choice questions (with answers) about professional accounting. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml index aada41a7..de0e8392 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml @@ -1,6 +1,5 @@ dataset_name: professional_law -description: 'The following are multiple choice questions (with answers) about professional - law. +description: 'The following are multiple choice questions (with answers) about professional law. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml index 3febeb67..ef4d0c07 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml @@ -1,6 +1,5 @@ dataset_name: professional_medicine -description: 'The following are multiple choice questions (with answers) about professional - medicine. +description: 'The following are multiple choice questions (with answers) about professional medicine. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml index 33b77f62..9bb12be0 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml @@ -1,6 +1,5 @@ dataset_name: professional_psychology -description: 'The following are multiple choice questions (with answers) about professional - psychology. +description: 'The following are multiple choice questions (with answers) about professional psychology. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml index dd7f9976..e9a761cc 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml @@ -1,6 +1,5 @@ dataset_name: public_relations -description: 'The following are multiple choice questions (with answers) about public - relations. +description: 'The following are multiple choice questions (with answers) about public relations. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml index cc236bd4..6a141ba6 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml @@ -1,6 +1,5 @@ dataset_name: security_studies -description: 'The following are multiple choice questions (with answers) about security - studies. +description: 'The following are multiple choice questions (with answers) about security studies. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml index 42b51e5e..7aa77456 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml @@ -1,6 +1,5 @@ dataset_name: us_foreign_policy -description: 'The following are multiple choice questions (with answers) about us - foreign policy. +description: 'The following are multiple choice questions (with answers) about us foreign policy. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml index 1525efe6..6f26fda1 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml @@ -1,6 +1,5 @@ dataset_name: world_religions -description: 'The following are multiple choice questions (with answers) about world - religions. +description: 'The following are multiple choice questions (with answers) about world religions. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml index a4e54c9e..c97c9f09 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml @@ -1,6 +1,5 @@ dataset_name: machine_learning -description: 'The following are multiple choice questions (with answers) about machine - learning. +description: 'The following are multiple choice questions (with answers) about machine learning. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml index 2c27958f..0464b15c 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml @@ -1,6 +1,5 @@ dataset_name: medical_genetics -description: 'The following are multiple choice questions (with answers) about medical - genetics. +description: 'The following are multiple choice questions (with answers) about medical genetics. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml index 5f869327..671ca84e 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml @@ -1,6 +1,5 @@ dataset_name: moral_disputes -description: 'The following are multiple choice questions (with answers) about moral - disputes. +description: 'The following are multiple choice questions (with answers) about moral disputes. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml index ecc63596..1ecbff40 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml @@ -1,6 +1,5 @@ dataset_name: moral_scenarios -description: 'The following are multiple choice questions (with answers) about moral - scenarios. +description: 'The following are multiple choice questions (with answers) about moral scenarios. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml index 93afd0fb..93b4e4d3 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml @@ -1,6 +1,5 @@ dataset_name: professional_accounting -description: 'The following are multiple choice questions (with answers) about professional - accounting. +description: 'The following are multiple choice questions (with answers) about professional accounting. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml index d1e02680..a8704652 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml @@ -1,6 +1,5 @@ dataset_name: professional_law -description: 'The following are multiple choice questions (with answers) about professional - law. +description: 'The following are multiple choice questions (with answers) about professional law. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml index 2e39c273..137a39d5 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml @@ -1,6 +1,5 @@ dataset_name: professional_medicine -description: 'The following are multiple choice questions (with answers) about professional - medicine. +description: 'The following are multiple choice questions (with answers) about professional medicine. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml index 2de37e23..342031f7 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml @@ -1,6 +1,5 @@ dataset_name: professional_psychology -description: 'The following are multiple choice questions (with answers) about professional - psychology. +description: 'The following are multiple choice questions (with answers) about professional psychology. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml index d87a9a0a..88ffe1b8 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml @@ -1,6 +1,5 @@ dataset_name: public_relations -description: 'The following are multiple choice questions (with answers) about public - relations. +description: 'The following are multiple choice questions (with answers) about public relations. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml index 84c4fa9e..b56c6803 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml @@ -1,6 +1,5 @@ dataset_name: security_studies -description: 'The following are multiple choice questions (with answers) about security - studies. +description: 'The following are multiple choice questions (with answers) about security studies. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml index 4672df82..797fc9b7 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml @@ -1,6 +1,5 @@ dataset_name: us_foreign_policy -description: 'The following are multiple choice questions (with answers) about us - foreign policy. +description: 'The following are multiple choice questions (with answers) about us foreign policy. ' diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml index e53b98c8..b0253c46 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml @@ -1,6 +1,5 @@ dataset_name: world_religions -description: 'The following are multiple choice questions (with answers) about world - religions. +description: 'The following are multiple choice questions (with answers) about world religions. ' diff --git a/lm_eval/tasks/super_glue/cb/t5_utils.py b/lm_eval/tasks/super_glue/cb/t5_utils.py index 644c2111..43eafce9 100644 --- a/lm_eval/tasks/super_glue/cb/t5_utils.py +++ b/lm_eval/tasks/super_glue/cb/t5_utils.py @@ -4,7 +4,9 @@ import sklearn.metrics def mean_3class_f1(predictions, references): # This is a passthrough function string_label = ["entailment", "contradiction", "neutral"] - predictions = string_label.index(predictions[0]) if predictions[0] in string_label else 0 + predictions = ( + string_label.index(predictions[0]) if predictions[0] in string_label else 0 + ) references = string_label.index(references[0]) return (predictions, references) -- GitLab From 13940f1ecd39377a25f86973c20e47c502271d31 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 10:39:50 +0000 Subject: [PATCH 101/264] add bbh flan_cot_fewshot --- .../_flan_cot_fewshot_template_yaml | 22 +++++++++++++++++++ .../flan_cot_fewshot/boolean_expressions.yaml | 5 +++++ .../flan_cot_fewshot/causal_judgement.yaml | 5 +++++ .../flan_cot_fewshot/date_understanding.yaml | 5 +++++ .../flan_cot_fewshot/disambiguation_qa.yaml | 5 +++++ .../bbh/flan_cot_fewshot/dyck_languages.yaml | 5 +++++ .../flan_cot_fewshot/formal_fallacies.yaml | 5 +++++ .../flan_cot_fewshot/geometric_shapes.yaml | 5 +++++ .../bbh/flan_cot_fewshot/hyperbaton.yaml | 5 +++++ .../logical_deduction_five_objects.yaml | 5 +++++ .../logical_deduction_seven_objects.yaml | 5 +++++ .../logical_deduction_three_objects.yaml | 5 +++++ .../movie_recommendation.yaml | 5 +++++ .../multistep_arithmetic_two.yaml | 5 +++++ .../tasks/bbh/flan_cot_fewshot/navigate.yaml | 5 +++++ .../bbh/flan_cot_fewshot/object_counting.yaml | 5 +++++ .../flan_cot_fewshot/penguins_in_a_table.yaml | 5 +++++ .../reasoning_about_colored_objects.yaml | 5 +++++ .../bbh/flan_cot_fewshot/ruin_names.yaml | 5 +++++ .../salient_translation_error_detection.yaml | 5 +++++ .../tasks/bbh/flan_cot_fewshot/snarks.yaml | 5 +++++ .../sports_understanding.yaml | 5 +++++ .../flan_cot_fewshot/temporal_sequences.yaml | 5 +++++ ...racking_shuffled_objects_five_objects.yaml | 5 +++++ ...acking_shuffled_objects_seven_objects.yaml | 5 +++++ ...acking_shuffled_objects_three_objects.yaml | 5 +++++ .../bbh/flan_cot_fewshot/web_of_lies.yaml | 5 +++++ .../bbh/flan_cot_fewshot/word_sorting.yaml | 5 +++++ 28 files changed, 157 insertions(+) create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/boolean_expressions.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/causal_judgement.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/date_understanding.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/disambiguation_qa.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/dyck_languages.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/formal_fallacies.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/geometric_shapes.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/hyperbaton.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_five_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_seven_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_three_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/movie_recommendation.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/multistep_arithmetic_two.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/navigate.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/object_counting.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/penguins_in_a_table.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/reasoning_about_colored_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/ruin_names.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/salient_translation_error_detection.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/snarks.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/sports_understanding.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/temporal_sequences.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_five_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_seven_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_three_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/web_of_lies.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_fewshot/word_sorting.yaml diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml new file mode 100644 index 00000000..d9dbf8c5 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml @@ -0,0 +1,22 @@ +group: bbh_flan_fewshot +dataset_path: lukaemon/bbh +output_type: greedy_until +test_split: test +doc_to_target: "{{target}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "(?<=The answer is )(.*)(?=.)" + - function: "take_first" \ No newline at end of file diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/boolean_expressions.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/boolean_expressions.yaml new file mode 100644 index 00000000..849e0435 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/boolean_expressions.yaml @@ -0,0 +1,5 @@ +"dataset_name": "boolean_expressions" +"description": "Evaluate the result of a random Boolean expression.\n\n" +"doc_to_text": " not ( ( not not True ) ) is\nA: Let's think step by step.\nRemember that (i) expressions inside brackets are always evaluated first and that (ii) the order of operations from highest priority to lowest priority is \"not\", \"and\", \"or\", respectively.\nWe first simplify this expression \"Z\" as follows: \"Z = not ( ( not not True ) ) = not ( ( A ) )\" where \"A = not not True\".\nLet's evaluate A: A = not not True = not (not True) = not False = True.\nPlugging in A, we get: Z = not ( ( A ) ) = not ( ( True ) ) = not True = False. So the answer is False. True and False and not True and True is\nA: Let's think step by step.\nRemember that (i) expressions inside brackets are always evaluated first and that (ii) the order of operations from highest priority to lowest priority is \"not\", \"and\", \"or\", respectively.\nWe first simplify this expression \"Z\" as follows: \"Z = True and False and not True and True = A and B\" where \"A = True and False\" and \"B = not True and True\".\nLet's evaluate A: A = True and False = False.\nLet's evaluate B: B = not True and True = not (True and True) = not (True) = False.\nPlugging in A and B, we get: Z = A and B = False and False = False. So the answer is False. not not ( not ( False ) ) is\nA: Let's think step by step.\nRemember that (i) expressions inside brackets are always evaluated first and that (ii) the order of operations from highest priority to lowest priority is \"not\", \"and\", \"or\", respectively.\nWe first simplify this expression \"Z\" as follows: \"Z = not not ( not ( False ) ) = not not ( A )\" where \"A = not ( False )\".\nLet's evaluate A: A = not ( False ) = not False = True.\nPlugging in A, we get: Z = not not ( A ) = not not (True) = not not False = True. So the answer is True.Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_boolean_expressions" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/causal_judgement.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/causal_judgement.yaml new file mode 100644 index 00000000..e7e80969 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/causal_judgement.yaml @@ -0,0 +1,5 @@ +"dataset_name": "causal_judgement" +"description": "Answer questions about causal attribution.\n\n" +"doc_to_text": " How would a typical person answer each of the following questions about causation?\nFrank T., had an ongoing dispute with his neighbor over a stretch of land and one day decided to shoot his neighbor in the body. Frank T. had no experience with guns, his hand slipped on the barrel of the gun, and the shot went wild. Nonetheless, the bullet bounced off a large boulder several feet away and hit the neighbor's body, causing significant injury. Did Frank T. intentionally shoot his neighbor in the body?\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nHere in this question, we are told that \"Frank T. had no experience with guns, his hand slipped on the barrel of the gun, and the shot went wild.\" A typical person would assume that this passage suggests that Frank T. had no intention of shooting and injuring someone and that the bullet accidentally hit the neighbor's body; therefore, we conclude that Frank T. did not intentionally hit his neighbor. So the answer is No. How would a typical person answer each of the following questions about causation?\nSuzy and Billy are working on a project that is very important for our nation's security. The boss tells them both: \"Be sure that you are here at exactly 9 am. It is absolutely essential that you arrive at that time.\" Both Billy and Suzy arrive at 9 am. As it happens, there was a motion detector installed in the room where they arrived. The motion detector was set up to be triggered if at least one person appeared in the room at the same time. So the motion detector went off. Did Billy cause the motion detector to go off?\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nHere in this question, we are told that the boss ordered them both to arrive at the meeting room at the same time and that the motion detector was set up to be triggered if at least one person appeared in the room at the same time.\" A typical person would assume that the person probably meant to say the detector was set up to be triggered if \"both persons\" appeared in the room at the same time, not at least one person, since otherwise the phrase \"at the same time\" would not make much sense in that sentence. Because the motion detector went off, a typical person would therefore come to the conclusion that both Suzy and Billy triggered the motion detector to go off; hence, Billy did indeed cause the motion detector to go off. So the answer is Yes. How would a typical person answer each of the following questions about causation?\nGeorge and his sister Lena reunite at their parents' house for Thanksgiving. Whereas George just got into medical school, Lena is unhappy in her marriage and recently lost her job. Over the course of the day, George and Lena get into a number of heated arguments. Later in the afternoon they play a game of darts. They split the first two games, and the third game is close until the end. Who will win comes down to George's last shot. If he hits a high point region, he wins; if he hits a low point region, Lena wins. George thinks of the difficult time Lena is having, and he really wants to let her win. He aims the dart at the low point region. He sets up his shot and the dart lands in the low point region. After his shot, Lena wins the game and is very happy. Did George hit the low point region intentionally?\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nHere in this question, we are told that \"He aims the dart at the low point region.\" A typical person might therefore think George did intentionally hit the low point region, because he wanted to lift up the spirit of his sister Lena. So the answer is Yes.Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_causal_judgement" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/date_understanding.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/date_understanding.yaml new file mode 100644 index 00000000..d5dc2117 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/date_understanding.yaml @@ -0,0 +1,5 @@ +"dataset_name": "date_understanding" +"description": "Infer the date from context.\n\n" +"doc_to_text": " Today is Christmas Eve of 1937. What is the date 10 days ago in MM/DD/YYYY?\nOptions:\n(A) 12/14/2026\n(B) 12/14/1950\n(C) 12/14/2007\n(D) 12/14/1937\n(E) 07/14/1938\n(F) 12/14/1988\nA: Let's think step by step.\nIf today is Christmas Eve of 1937, then today's date is December 24, 1937. 10 days before today is December 14, 1937, that is 12/14/1937. So the answer is (D). Tomorrow is 11/12/2019. What is the date one year ago from today in MM/DD/YYYY?\nOptions:\n(A) 09/04/2018\n(B) 11/11/2018\n(C) 08/25/2018\n(D) 11/02/2018\n(E) 11/04/2018\nA: Let's think step by step.\nIf tomorrow is 11/12/2019, then today is 11/11/2019. The date one year ago from today is 11/11/2018. So the answer is (B). Jane and John married on Jan 2, 1958. It is their 5-year anniversary today. What is the date tomorrow in MM/DD/YYYY?\nOptions:\n(A) 01/11/1961\n(B) 01/03/1963\n(C) 01/18/1961\n(D) 10/14/1960\n(E) 01/03/1982\n(F) 12/03/1960\nA: Let's think step by step.\nIf Jane and John married on Jan 2, 1958, then and if it is their 5-year anniversary today, then today's date is Jan 2, 1963. The date tomorrow is Jan 3, 1963, that is 01/03/1963. So the answer is (B).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_date_understanding" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/disambiguation_qa.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/disambiguation_qa.yaml new file mode 100644 index 00000000..2d08474c --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/disambiguation_qa.yaml @@ -0,0 +1,5 @@ +"dataset_name": "disambiguation_qa" +"description": "Clarify the meaning of sentences with ambiguous pronouns.\n\n" +"doc_to_text": " In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: The chief told the counselor that they took the day off.\nOptions:\n(A) The chief took the day off\n(B) The counselor took the day off\n(C) Ambiguous\nA: Let's think step by step.\nHere we need to determine who the pronoun \"they\" might be referring to. There are two possible referents for \"they\", namely the chief and the counselor. The verb \"told\" might be able to help us determine which one is more likely (if either). Let X be the chief and Y the counselor. The sentence is then of the form \"X told Y that (X or Y) did something.\"\nLet's consider Y first: \"X told Y that Y did something.\" This case does not make much sense, as Y would already have the information that Y did something, because it is information about themself.\nNow, consider X: \"X told Y that X did something.\" This makes sense, because X would be sharing some information about themself that Y might not have known before.\nBecause in this context, X is the chief and Y is the counselor, the answer should be the chief. So the answer is (A). In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: The manager sent a message to the secretary, but he didn't reply yet.\nOptions:\n(A) The secretary didn't reply yet\n(B) The manager didn't reply yet\n(C) Ambiguous\nA: Let's think step by step.\nHere we need to determine who the pronoun \"he\" might be referring to. There are two possible referents for \"he\", namely the manager and the secretary. The verbs \"sent\" and \"reply\" might be able to help us determine which one is more likely (if either). Let X be the manager and Y the secretary. The sentence is then of the form \"X sent a message to Y, but (X or Y) didn't reply yet.\"\nLet's consider Y first: \"X sent a message to Y, but Y didn't reply yet.\" This case makes sense, because of the implicit causality of the sentence. Y was the receiver of the message, but Y didn't get back to X yet.\nNow, consider X: \"X sent a message to Y, but X didn't reply yet.\" This case doesn't make sense, because X was the initial sender of the message, so it is now Y's turn to write back to X.\nBecause in this context, X is the manager and Y is the secretary, the answer should be the secretary. So the answer is (A). In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: Bailey will plan to meet the director at his office\nOptions:\n(A) It will be Bailey's office\n(B) It will be the director's office\n(C) Ambiguous\nA: Let's think step by step.\nHere we need to determine who the pronoun \"his\" might be referring to. There are two possible referents for \"his\", namely Bailey's and the director's. The verb phrase \"plan to meet\" might be able to help us determine which one is more likely (if either). Let X be Bailey and Y the director. The sentence is then of the form \"X will plan to meet Y at (X or Y)'s office.\"\nLet's consider Y first: \"X will plan to meet Y at Y's office.\" This case makes sense, because X might want to meet up with Y at Y's office.\nNow, consider X: \"X will plan to meet Y at X's office.\" This case also makes sense, because X might want to meet up with Y at X's own office.\nBecause both X and Y are possible at the same time, we conclude that the antecedent of the pronoun is ambiguous. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_disambiguation_qa" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/dyck_languages.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/dyck_languages.yaml new file mode 100644 index 00000000..f15bda16 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/dyck_languages.yaml @@ -0,0 +1,5 @@ +"dataset_name": "dyck_languages" +"description": "Correctly close a Dyck-n word.\n\n" +"doc_to_text": " Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: [ { [\nA: Let's think step by step.\nWe should process each input one by one and keep track of the stack configuration.\n0: empty stack\n1: [ ; stack: [\n2: { ; stack: [ {\n3: [ ; stack: [ { [\nNow, we have reached the end. The final stack is \"[ { [\".\nWe will need to pop out \"[\", \"{\", \"[\" one by one in that order.\nSo, we need \"]\", \"}\", \"]\". So the answer is ] } ]. Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < > ( ( [ [ ( { } ) [ < > ] ]\nA: Let's think step by step.\nWe should process each input one by one and keep track of the stack configuration.\n0: empty stack\n1: < ; stack: <\n2: > ; stack: empty\n3: ( ; stack: (\n4: ( ; stack: ( (\n5: [ ; stack: ( ( [\n6: [ ; stack: ( ( [ [\n7: ( ; stack: ( ( [ [ (\n8: { ; stack: ( ( [ [ ( {\n9: } ; stack: ( ( [ [ (\n10: ) ; stack: ( ( [ [\n11: [ ; stack: ( ( [ [ [\n12: < ; stack: ( ( [ [ [ <\n13: > ; stack: ( ( [ [ [\n14: ] ; stack: ( ( [ [\n15: ] ; stack: ( ( [\nNow, we have reached the end. The final stack is \"( ( [\".\nWe will need to pop out \"[\", \"(\", \"(\" one by one in that order.\nSo, we need \"]\", \")\", \")\". So the answer is ] ) ). Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < [ < [ { < [ ] < { } > > } ] > { { ( ) } { < [ < > ] > }\nA: Let's think step by step.\nWe should process each input one by one and keep track of the stack configuration.\n0: empty stack\n1: < ; stack: <\n2: [ ; stack: < [\n3: < ; stack: < [ <\n4: [ ; stack: < [ < [\n5: { ; stack: < [ < [ {\n6: < ; stack: < [ < [ { <\n7: [ ; stack: < [ < [ { < [\n8: ] ; stack: < [ < [ { <\n9: < ; stack: < [ < [ { < <\n10: { ; stack: < [ < [ { < < {\n11: } ; stack: < [ < [ { < <\n12: > ; stack: < [ < [ { <\n13: > ; stack: < [ < [ {\n14: } ; stack: < [ < [\n15: ] ; stack: < [ <\n16: > ; stack: < [\n17: { ; stack: < [ {\n18: { ; stack: < [ { {\n19: ( ; stack: < [ { { (\n20: ) ; stack: < [ { {\n21: } ; stack: < [ {\n22: { ; stack: < [ { {\n23: < ; stack: < [ { { <\n24: [ ; stack: < [ { { < [\n25: < ; stack: < [ { { < [ <\n26: > ; stack: < [ { { < [\n27: ] ; stack: < [ { { <\n28: > ; stack: < [ { {\n29: } ; stack: < [ {\nNow, we have reached the end. The final stack is \"< [ {\".\nWe will need to pop out \"{\", \"[\", \"<\" one by one in that order.\nSo, we need \"}\", \"]\", \">\". So the answer is } ] >.Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_dyck_languages" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/formal_fallacies.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/formal_fallacies.yaml new file mode 100644 index 00000000..7e28e84e --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/formal_fallacies.yaml @@ -0,0 +1,5 @@ +"dataset_name": "formal_fallacies" +"description": "Distinguish deductively valid arguments from formal fallacies.\n\n" +"doc_to_text": " \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: To begin with, Lesley is a close friend of Fernando. Moreover, being a close friend of Fernando or a schoolmate of Lowell is sufficient for being a great-grandfather of Leroy. It follows that Lesley is a great-grandfather of Leroy.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: Let's think step by step.\n(1) Lesley is a close friend of Fernando: Lesley = friend(Fernando).\n(2) Being a close friend of Fernando or a schoolmate of Lowell is sufficient for being a great-grandfather of Leroy: If X = friend(Fernando) OR SCHOOLMATE(Lowell), then X = great-grandfather(Leroy).\nHypothesis: Does it follow that Lesley is a great-grandfather of Leroy: Lesley = great-grandfather(Leroy)?\nLet’s see whether the Hypothesis can be deduced from the arguments (1) and (2) by logical reasoning?\nBy (1), we have Lesley = friend(Fernando). By (2), we have if Lesley = friend(Fernando), then Lesley = great-grandfather(Leroy).\nSo, it is true that Lesley is a great-grandfather of Leroy. So the answer is valid. \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: Whoever is not a great-grandfather of Clyde is a stepbrother of Brian. Being an ancestor of Dana is sufficient for not being a great-grandfather of Clyde. We may conclude: Everyone who is an ancestor of Dana is a stepbrother of Brian, too.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: Let's think step by step.\n(1) Whoever is not a great-grandfather of Clyde is a stepbrother of Brian: If X = NOT (great-grandfather(Clyde)), then X = stepbrother(Brian).\n(2): Being an ancestor of Dana is sufficient for not being a great-grandfather of Clyde: If X = ancestor(Dana), X = NOT (great-grandfather(Clyde)).\nHypothesis: Does it follow that everyone who is an ancestor of Dana is a stepbrother of Brian, too: If X = ancestor(Dana), then X = stepbrother(Brian)?\nLet’s see whether the Hypothesis can be deduced from the arguments (1) and (2) by logical reasoning?\nBy (2), we have if X = ancestor(Dana), X = NOT (great-grandfather(Clyde)).\nFurthermore, by (1), we have if X = NOT (great-grandfather(Clyde)), then X = stepbrother(Brian).\nBy the transitive relation rule in first-order logic, we then have: if X = ancestor(Dana), then X = stepbrother(Brian).\nSo, it is true that everyone who is an ancestor of Dana is a stepbrother of Brian. So the answer is valid. \"It is not always easy to grasp who is consuming which products. The following argument pertains to this question: Every infrequent user of Paul Mitchell shampoo is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap, or both. No regular consumer of Lush soap is a rare consumer of Nioxin shampoo and, in the same time, a loyal buyer of Caress soap. It follows that whoever is an infrequent user of Paul Mitchell shampoo is not a regular consumer of Lush soap.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: Let's think step by step.\n(1) Every infrequent user of Paul Mitchell shampoo is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap, or both: If X = infrequent-user(Paul Mitchell), then X = rare-consumer(Nioxin) OR X = loyal-buyer(Caress).\n(2): No regular consumer of Lush soap is a rare consumer of Nioxin shampoo and a loyal buyer of Caress soap at the same time. If X = regular-consumer(Lush), then X = NOT (rare-consumer(Nioxin) AND loyal-buyer(Caress)).\nHypothesis: Does it follow that whoever is an infrequent user of Paul Mitchell shampoo is not a regular consumer of Lush soap: If X = infrequent-user(Paul Mitchell), then X = NOT (regular-consumer(Lush))?\nLet’s see whether the Hypothesis can be deduced from the arguments (1) and (2) by logical reasoning?\nBy (1), we have if X = infrequent-user(Paul Mitchell), then X = rare-consumer(Nioxin) OR X = loyal-buyer(Caress). We need to consider both cases separately:\nThe case X = rare-consumer(Nioxin) does not appear in (2).\nThe case X = loyal-buyer(Caress) does not appear in (2), either.\nSo, from (1) and (2), we cannot necessarily deduce the Hypothesis. So the answer is invalid.Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_formal_fallacies" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/geometric_shapes.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/geometric_shapes.yaml new file mode 100644 index 00000000..07844a53 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/geometric_shapes.yaml @@ -0,0 +1,5 @@ +"dataset_name": "geometric_shapes" +"description": "Name geometric shapes from their SVG paths.\n\n" +"doc_to_text": " This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 9 separate commands.\n(1) M 31.00,73.00: Move the current point to 31.00,73.00.\n(2) L 32.00,59.00: Create a line from 31.00,73.00 to 32.00,59.00.\n(3) L 44.00,50.00: Create a line from 32.00,59.00 to 44.00,50.00.\n(4) L 49.00,41.00: Create a line from 44.00,50.00 to 49.00,41.00.\n(5) L 64.00,37.00: Create a line from 49.00,41.00 to 64.00,37.00.\n(6) L 71.00,55.00: Create a line from 64.00,37.00 to 71.00,55.00.\n(7) L 64.00,76.00: Create a line from 71.00,55.00 to 64.00,76.00.\n(8) L 52.00,61.00: Create a line from 64.00,76.00 to 52.00,61.00.\n(9) L 31.00,73.00: Create a line from 52.00,61.00 to 31.00,73.00.\nThis SVG path starts at point 31.00,73.00, creates eight consecutive and touching lines, and then returns back its starting point, thereby creating an eight-sided shape. It does not have any curves or arches. \"octagon\" is the only eight-sided object on the list. So the answer is (F). This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 6 separate commands.\n(1) M 14.19,26.04: Move the current point to 14.19,26.04.\n(2) L 51.43,39.21: Create a line from 14.19,26.04 to 51.43,39.21.\n(3) L 58.44,36.69: Create a line from 51.43,39.21 to 58.44,36.69.\n(4) L 56.63,30.17: Create a line from 58.44,36.69 to 56.63,30.17.\n(5) L 48.53,26.66: Create a line from 56.63,30.17 to 48.53,26.66.\n(6) L 14.19,26.04: Create a line from 48.53,26.66 to 14.19,26.04.\nThis SVG path starts at point 14.19,26.04, creates five consecutive and touching lines, and then returns back its starting point, thereby creating a five-sided shape. It does not have any curves or arches. \"pentagon\" is the only five-sided polygon on the list. So the answer is (G). This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 5 separate commands.\n(1) M 41.00,43.00: Move the current point to 41.00,43.00.\n(2) L 37.00,34.00: Create a line from 41.00,43.00 to 37.00,34.00.\n(3) L 41.00,33.00: Create a line from 37.00,34.00 to 41.00,33.00.\n(4) L 45.00,34.00: Create a line from 41.00,33.00 to 45.00,34.00.\n(5) L 41.00,43.00: Create a line from 45.00,34.00 to 41.00,43.00.\nThis SVG path starts at point 41.00,43.00, creates four consecutive and touching lines, and then returns back its starting point, thereby creating a four-sided shape. \"kite\" and \"rectangle\" are the only two four-sided polygons on the list. So, we need to determine which one is the correct answer.\nA kite has two pairs of equal-length adjacent sides, whereas a rectangle has two pairs of equal-length alternate (opposite) sides. Now, let's check whether the two adjacent sides of this shape are equal.\nLength of side A: |A| = sqrt((41.00-37.00)^2 + (43.00-34.00)^2) = sqrt((4)^2 + (9)^2) = sqrt(16 + 81) = sqrt(97).\nLength of side B: |B| = sqrt((37.00-41.00)^2 + (34.00-33.00)^2)) = sqrt((4)^2 + (1)^2) = sqrt(16 + 1) = sqrt(17).\nLength of side C: |C| = sqrt((41.00-45.00)^2 + (33.00-34.00)^2)) = sqrt((-4)^2 + (-1)^2) = sqrt(16 + 1) = sqrt(17).\nLength of side D: |D| = sqrt((45.00-41.00)^2 + (34.00-43.00)^2)) = sqrt((4)^2 + (-9)^2) = sqrt(16 + 81) = sqrt(97).\nNote that |A| = |D| and |B| = |C|. Furthermore, A and D are adjacent and B and C are adjacent. Thus, this polygon has two pairs of equal-length adjacent sides and is \"kite\". So the answer is (D).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_geometric_shapes" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/hyperbaton.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/hyperbaton.yaml new file mode 100644 index 00000000..2dc997cc --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/hyperbaton.yaml @@ -0,0 +1,5 @@ +"dataset_name": "hyperbaton" +"description": "Order adjectives correctly in English sentences.\n\n" +"doc_to_text": " Which sentence has the correct adjective order:\nOptions:\n(A) rubber terrible ship\n(B) terrible rubber ship\nA: Let's think step by step.\nWhen there is more than one adjective before a noun, the adjectives need to respect the following order before a noun: \"[1. opinion] [2. size] [3. age] [4. shape] [5. color] [6. origin] [7. material] [8. purpose] noun\".\nOption (A): \"rubber terrible ship\". (1) rubber\" falls into the material category. (2) \"terrible\" falls into the opinion category. Option (A) has the following adjective order: [7. material] [1. opinion] (or, in numeric terms, 7 1). Because 7 < 1 is not correct, (A) does not have the correct ordering.\nOption (B): \"terrible rubber ship\". Option (B) has the following adjective order: [1. opinion] [7. material] (or, in numeric terms, 1 7). Because 1 < 7 is correct, (B) has the correct ordering. So the answer is (B). Which sentence has the correct adjective order:\nOptions:\n(A) repulsive small Brazilian exercise ship\n(B) Brazilian repulsive exercise small ship\nA: Let's think step by step.\nWhen there is more than one adjective before a noun, the adjectives need to respect the following order before a noun: \"[1. opinion] [2. size] [3. age] [4. shape] [5. color] [6. origin] [7. material] [8. purpose] noun\".\nOption (A): \"repulsive small Brazilian exercise ship\". (1) \"repulsive\" falls into the opinion category. (2) \"small\" falls into the size category. (3) \"Brazilian\" falls into the origin category. (4) \"exercise\" falls into the purpose category. Option (A) has the following adjective order: [1. opinion] [2. size] [6. origin] [8. purpose] (or, in numeric terms, 1 2 6 8). Because 1 < 2 < 6 < 8 is correct, (A) has the correct ordering.\nOption (B): \"Brazilian repulsive exercise small ship\". Option (B) has the following adjective order: [6. origin] [1. opinion] [8. purpose] [2. size] (or, in numeric terms, 6 1 8 2). Because 6 < 1 < 8 < 2 is not correct, (B) does not have the correct ordering. So the answer is (A). Which sentence has the correct adjective order:\nOptions:\n(A) blue gold wonderful square shoe\n(B) wonderful square blue gold shoe\nA: Let's think step by step.\nWhen there is more than one adjective before a noun, the adjectives need to respect the following order before a noun: \"[1. opinion] [2. size] [3. age] [4. shape] [5. color] [6. origin] [7. material] [8. purpose] noun\".\nOption (A): \"blue gold wonderful square shoe\". (1) \"blue\" falls into the color category. (2) \"gold\" falls into the material category. (3) \"wonderful\" falls into the opinion category. (4) \"square\" falls into the shape category. The adjective order that Option (A) has is [5. color] [7. material] [1. opinion] [4. shape] (or, in numeric terms, 5 7 1 4). Because 5 < 7 < 1 < 4 is not correct, (A) does not have the correct ordering.\nOption (B): \"wonderful square blue gold shoe\". Option (B) has the following adjective order: [1. opinion] [4. shape] [5. color] [7. material] (or, in numeric terms, 1 4 5 7 ). Because 1 < 4 < 5 < 7 is correct, (B) has the correct ordering. So the answer is (B).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_hyperbaton" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_five_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_five_objects.yaml new file mode 100644 index 00000000..b54bac3b --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_five_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_five_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": " The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: Let's think step by step.\n(1) Eve finished above Amy: \"(above) ? Eve ? Amy ? (below)\".\n(2) Eli finished below Amy: \"(above) ? Amy ? Eli ? (below)\".\n(3) Combining (1) and (2) we get the following ordering: \"(above) Eve Amy Eli (below)\".\nAccording to this ordering, the person who finished last (the one at the bottom of this list) is Eli.\nEli finished last. So the answer is (B). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: Let's think step by step.\n(1) The green book is to the right of the white book: \"(left) ? white ? green ? (right)\".\n(2) The orange book is the rightmost: \"(left) ? white ? green orange (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white green orange (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (A). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: Let's think step by step.\n(1) The white book is to the left of the gray book: \"(left) ? white ? gray ? (right)\".\n(2) The red book is the second from the left: \"(left) ? white red gray ? (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white red gray (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_logical_deduction_five_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_seven_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_seven_objects.yaml new file mode 100644 index 00000000..f4799ae6 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_seven_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_seven_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": " The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: Let's think step by step.\n(1) Eve finished above Amy: \"(above) ? Eve ? Amy ? (below)\".\n(2) Eli finished below Amy: \"(above) ? Amy ? Eli ? (below)\".\n(3) Combining (1) and (2) we get the following ordering: \"(above) Eve Amy Eli (below)\".\nAccording to this ordering, the person who finished last (the one at the bottom of this list) is Eli.\nEli finished last. So the answer is (B). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: Let's think step by step.\n(1) The green book is to the right of the white book: \"(left) ? white ? green ? (right)\".\n(2) The orange book is the rightmost: \"(left) ? white ? green orange (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white green orange (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (A). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: Let's think step by step.\n(1) The white book is to the left of the gray book: \"(left) ? white ? gray ? (right)\".\n(2) The red book is the second from the left: \"(left) ? white red gray ? (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white red gray (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_logical_deduction_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_three_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_three_objects.yaml new file mode 100644 index 00000000..cbc0dbd1 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_three_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_three_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": " The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: Let's think step by step.\n(1) Eve finished above Amy: \"(above) ? Eve ? Amy ? (below)\".\n(2) Eli finished below Amy: \"(above) ? Amy ? Eli ? (below)\".\n(3) Combining (1) and (2) we get the following ordering: \"(above) Eve Amy Eli (below)\".\nAccording to this ordering, the person who finished last (the one at the bottom of this list) is Eli.\nEli finished last. So the answer is (B). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: Let's think step by step.\n(1) The green book is to the right of the white book: \"(left) ? white ? green ? (right)\".\n(2) The orange book is the rightmost: \"(left) ? white ? green orange (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white green orange (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (A). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: Let's think step by step.\n(1) The white book is to the left of the gray book: \"(left) ? white ? gray ? (right)\".\n(2) The red book is the second from the left: \"(left) ? white red gray ? (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white red gray (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_logical_deduction_three_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/movie_recommendation.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/movie_recommendation.yaml new file mode 100644 index 00000000..573b3222 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/movie_recommendation.yaml @@ -0,0 +1,5 @@ +"dataset_name": "movie_recommendation" +"description": "Recommend movies similar to the given list of movies.\n\n" +"doc_to_text": " Find a movie similar to Star Wars Episode IV - A New Hope, Indiana Jones and the Last Crusade, Star Wars Episode V - The Empire Strikes Back, The Big Lebowski:\nOptions:\n(A) Tetsuo\n(B) the Ironman\n(C) The Princess Bride\n(D) The Barkley Marathons The Race That Eats Its Young\n(E) Bug\nA: Let's think step by step.\n- Star Wars Episode IV - A New Hope (action, adventure, fantasy; 1977)\n- Indiana Jones and the Last Crusade (action, adventure; 1989)\n- Star Wars Episode V - The Empire Strikes Back (action, adventure, fantasy; 1980)\n- The Big Lebowski (action, drama, comedy; 1998)\nThese are all famous classic American movies produced before 2000. Amongst all the options, the only movie similar to these ones seems to be The Princess Bride (1987). So the answer is (C). Find a movie similar to Twister, The Silence of the Lambs, Independence Day, Braveheart:\nOptions:\n(A) They Shoot Horses\n(B) Don't They\n(C) Forrest Gump\n(D) The Salton Sea\n(E) Extreme Days\nA: Let's think step by step.\n- Twister (action, adventure, thriller; 1996)\n- The Silence of the Lambs (crime, drama, thriller; 1991)\n- Independence Day (action, science-fiction, drama; 1996)\n- Braveheart (biography, drama, epic; 1995)\nThese are all famous Hollywood movies produced around the 1990s. Amongst all the options, the only movie similar to these ones seems to be Forrest Gump (comedy, drama, romance; 1994). So the answer is (C). Find a movie similar to Minority Report, Total Recall, Inside Out, Forrest Gump:\nOptions:\n(A) Phenomena\n(B) Lilting\n(C) Catwoman\n(D) Edge of Tomorrow\nA: Let's think step by step.\n- Minority Report (action, crime, mystery; 2002)\n- Total Recall (action, adventure, science-fiction; 2012)\n- Inside Out (animation, family, comedy; 2015)\n- Forrest Gump (comedy, drama, romance; 1994)\nThese are all famous movies produced in the past few decades.Amongst all the options, the only movie similar to these ones seems to be Edge of Tomorrow (action, adventure, crime, mystery; 2014), as it is also a science-fiction movie and features Tom Cruise. So the answer is (D).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_movie_recommendation" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/multistep_arithmetic_two.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/multistep_arithmetic_two.yaml new file mode 100644 index 00000000..21f34e7a --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/multistep_arithmetic_two.yaml @@ -0,0 +1,5 @@ +"dataset_name": "multistep_arithmetic_two" +"description": "Solve multi-step arithmetic problems.\n\n" +"doc_to_text": " ((-5 + 9 * -4 - 0) * (4 + -7 + 0 * -5)) =\nA: Let's think step by step.\nLet’s recall that the order of operations in mathematics is as follows: (1) Parentheses, (2) exponents, (3) multiplication and division (from left to right), (4) addition and multiplication (from left to right). So, remember to always compute the expressions inside parentheses or brackets first.\nThis equation can be written as \"A * B\", where A = (-5 + 9 * -4 - 0) and B = (4 + -7 + 0 * -5).\nLet's calculate A = (-5 + 9 * -4 - 0) = (-5 + (9 * -4) - 0) = (-5 + (-36) - 0) = (-5 + -36 - 0) = -5 - 36 = -41.\nLet's calculate B = (4 + -7 + 0 * -5) = (4 + -7 + (0 * -5)) = (4 + -7 + 0) = (4 + -7) = (4 - 7) = -3.\nThen, the final equation is A * B = -41 * -3 = (-61) * (-3) = 123. So the answer is 123. ((-9 * 7 * 7 * -9) + (4 * -9 - 8 - -4)) =\nA: Let's think step by step.\nLet’s recall that the order of operations in mathematics is as follows: (1) Parentheses, (2) exponents, (3) multiplication and division (from left to right), (4) addition and multiplication (from left to right). So, remember to always compute the expressions inside parentheses or brackets first.\nThis equation can be written as \"A + B\", where A = (-9 * 7 * 7 * -9) and B = (4 * -9 - 8 - -4).\nLet's calculate A = (-9 * 7 * 7 * -9) = ((-9 * 7) * (7 * -9)) = ((-63) * (-63)) = 3969.\nLet's calculate B = (4 * -9 - 8 - (-4)) = ((4 * -9) - 8 - (-4)) = ((-36) - 8 - (-4)) = ((-36 - 8) - (-4)) = (-44 - (-4)) = -40.\nThen, the final equation is A + B = 3969 + -40 = 3969 - 40 = 3929. So the answer is 3929. ((-3 + 5 * 8 * -4) - (9 - 8 * -7 + -9)) =\nA: Let's think step by step.\nLet’s recall that the order of operations in mathematics is as follows: (1) Parentheses, (2) exponents, (3) multiplication and division (from left to right), (4) addition and multiplication (from left to right). So, remember to always compute the expressions inside parentheses or brackets first.\nThis equation can be written as \"A - B\", where A = (-3 + 5 * 8 * -4) and B = (9 - 8 * -7 + -9).\nLet's calculate A = (-3 + 5 * 8 * -4) = (-3 + (5 * 8) * -4) = (-3 + (40) * -4) = (-3 + (40 * -4)) = (-3 + -160) = -163.\nLet's calculate B = (9 - 8 * -7 + -9) = (9 - (8 * -7) + -9) = (9 - (-56) + -9) = ((9 - (-56)) + -9) = ((65) + -9)= (65 - 9) = 56.\nThen, the final equation is A - B = -163 - 56 = -219. So the answer is -219.Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_multistep_arithmetic_two" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/navigate.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/navigate.yaml new file mode 100644 index 00000000..be3cd234 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/navigate.yaml @@ -0,0 +1,5 @@ +"dataset_name": "navigate" +"description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n" +"doc_to_text": " If you follow these instructions, do you return to the starting point? Turn left. Turn around. Turn left. Take 7 steps. Take 2 steps. Take 4 steps. Take 8 steps.\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nWe start at the origin (0, 0), facing the positive y-axis.\n(1) Turn left: (0, 0), facing the negative x-axis.\n(2) Turn around: (0, 0), facing the positive x-axis.\n(3) Turn left: (0, 0), facing the positive y-axis.\n(4) Take 7 steps: (0, 7), facing the positive y-axis.\n(5) Take 2 steps: (0, 9), facing the positive y-axis.\n(6) Take 4 steps: (0, 13), facing the positive y-axis.\n(7) Take 8 steps: (0, 21), facing the positive y-axis.\nSince (0, 21) is not (0, 0), we are not where we started. So the answer is No. If you follow these instructions, do you return to the starting point? Turn around. Take 1 step. Take 6 steps. Turn around. Take 6 steps. Take 9 steps. Take 1 step.\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nWe start at the origin (0, 0), facing the positive y-axis.\n(1) Turn around: (0, 0), facing the negative y-axis.\n(2) Take 1 step: (0, -1), facing the negative y-axis.\n(3) Take 6 steps: (0, -7), facing the negative y-axis.\n(4) Turn around: (0, -7), facing the positive y-axis.\n(5) Take 6 steps: (0, -1), facing the positive y-axis.\n(6) Take 9 steps: (0, 8), facing the positive y-axis.\n(7) Take 1 step: (0, 9), facing the positive y-axis.\nSince (0, 9) is not (0, 0), we are not where we started. So the answer is No. If you follow these instructions, do you return to the starting point? Always face forward. Take 2 steps right. Take 9 steps left. Take 7 steps right.\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nWe start at the origin (0, 0), facing the positive y-axis.\n(1) Always face forward: (0, 0), facing the positive y-axis.\n(2) Take 2 steps right: (0, 2), facing the positive y-axis.\n(3) Take 9 steps left: (0, -7), facing the positive y-axis.\n(4) Take 7 steps right: (0, 7), facing the positive y-axis.\nSince (0, 0) is (0, 0), we are indeed where we started. So the answer is Yes.Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_navigate" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/object_counting.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/object_counting.yaml new file mode 100644 index 00000000..767d414f --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/object_counting.yaml @@ -0,0 +1,5 @@ +"dataset_name": "object_counting" +"description": "Questions that involve enumerating objects and asking the model to count them.\n\n" +"doc_to_text": " I have a blackberry, a clarinet, a nectarine, a plum, a strawberry, a banana, a flute, an orange, and a violin. How many fruits do I have?\nA: Let's think step by step.\nWe first identify the fruits on the list and include their quantity in parentheses:\n- blackberry (1)\n- nectarine (1)\n- plum (1)\n- strawberry (1)\n- banana (1)\n- orange (1)\nNow, let's add the numbers in parentheses: 1 + 1 + 1 + 1 + 1 + 1 = 6. So the answer is 6. I have an orange, a raspberry, two peaches, a blackberry, an apple, a grape, a nectarine, and three plums. How many fruits do I have?\nA: Let's think step by step.\nWe first identify the fruits on the list and include their quantity in parentheses:\n- orange (1)\n- raspberry (1)\n- peaches (2)\n- blackberry (1)\n- apple (1)\n- grape (1)\n- nectarine (1)\n- plums (3)\nNow, let's add the numbers in parentheses: 1 + 1 + 2 + 1 + 1 + 1 + 1 + 3 = 11. So the answer is 11. I have a lettuce head, a head of broccoli, an onion, a stalk of celery, two carrots, a garlic, and a yam. How many vegetables do I have?\nA: Let's think step by step.\nWe first identify the vegetables on the list and include their quantity in parentheses:\n- lettuce (1)\n- broccoli (1)\n- onion (1)\n- celery (1)\n- carrots (2)\n- garlic (1)\n- yam (1)\nNow, let's add the numbers in parentheses: 1 + 1 + 1 + 1 + 2 + 1 + 1 = 8. So the answer is 8.Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_object_counting" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/penguins_in_a_table.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/penguins_in_a_table.yaml new file mode 100644 index 00000000..439c5f45 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/penguins_in_a_table.yaml @@ -0,0 +1,5 @@ +"dataset_name": "penguins_in_a_table" +"description": "Answer questions about a table of penguins and their attributes.\n\n" +"doc_to_text": " Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. We now add a penguin to the table:\nJames, 12, 90, 12\nHow many penguins are less than 8 years old?\nOptions:\n(A) 1\n(B) 2\n(C) 3\n(D) 4\n(E) 5\nA: Let's think step by step.\nThis question focuses on age. We know the following: Louis is 7 years old, Bernard is 5 years old, Vincent is 9 years old, and Gwen is 8 years old.\nNow, we add James to this table: James is 12 years old.\nThe penguins that are less than 8 years old are Louis and Bernard.\nThere are 2 penguins less than 8 years old. So the answer is (B). Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. Which is the youngest penguin?\nOptions:\n(A) Louis\n(B) Bernard\n(C) Vincent\n(D) Gwen\n(E) James\nA: Let's think step by step.\nThis question focuses on age. We know the following: Louis is 7 years old, Bernard is 5 years old, Vincent is 9 years old, and Gwen is 8 years old.\nAccording to the table, Bernard (5) is the youngest amongst them.\nThe youngest penguin is Bernard. So the answer is (B). Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. What is the name of the second penguin sorted by alphabetic order?\nOptions:\n(A) Louis\n(B) Bernard\n(C) Vincent\n(D) Gwen\n(E) James\nA: Let's think step by step.\nThis question focuses on the name. We know the following: The names of the penguin in the table are Louis, Bernard, Vincent, and Gwen.\nWhen we sort their names alphabetically, we get Bernard, Gwen, Louis, Vincent.\nThe name of the second penguin sorted by alphabetical order is Gwen.\nThe name of the second penguin sorted by alphabetic order is Gwen. So the answer is (D).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_penguins_in_a_table" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/reasoning_about_colored_objects.yaml new file mode 100644 index 00000000..a93951da --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/reasoning_about_colored_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "reasoning_about_colored_objects" +"description": "Answer extremely simple questions about the colors of objects on a surface.\n\n" +"doc_to_text": " On the nightstand, there is a red pencil, a purple mug, a burgundy keychain, a fuchsia teddy bear, a black plate, and a blue stress ball. What color is the stress ball?\nOptions:\n(A) red\n(B) orange\n(C) yellow\n(D) green\n(E) blue\n(F) brown\n(G) magenta\n(H) fuchsia\n(I) mauve\n(J) teal\n(K) turquoise\n(L) burgundy\n(M) silver\n(N) gold\n(O) black\n(P) grey\n(Q) purple\n(R) pink\nA: Let's think step by step.\nAccording to this question, the color of the stress ball is blue. So the answer is (E). On the table, you see a bunch of objects arranged in a row: a purple paperclip, a pink stress ball, a brown keychain, a green scrunchiephone charger, a mauve fidget spinner, and a burgundy pen. What is the color of the object directly to the right of the stress ball?\nOptions:\n(A) red\n(B) orange\n(C) yellow\n(D) green\n(E) blue\n(F) brown\n(G) magenta\n(H) fuchsia\n(I) mauve\n(J) teal\n(K) turquoise\n(L) burgundy\n(M) silver\n(N) gold\n(O) black\n(P) grey\n(Q) purple\n(R) pink\nA: Let's think step by step.\nAccording to this question, the objects are arranged in a row, from left to right, as follows: (1) a purple paperclip, (2) a pink stress ball, (3) a brown keychain, (4) a green scrunchiephone charger, (5) a mauve fidget spinner, (6) a burgundy pen.\nThe stress ball is the second object on the list, namely (2). The object that is to the right of the stress ball corresponds to (3), which is a brown keychain.\nThe color of the keychain is brown. So the answer is (F). On the nightstand, you see the following items arranged in a row: a teal plate, a burgundy keychain, a yellow scrunchiephone charger, an orange mug, a pink notebook, and a grey cup. How many non-orange items do you see to the left of the teal item?\nOptions:\n(A) zero\n(B) one\n(C) two\n(D) three\n(E) four\n(F) five\n(G) six\nA: Let's think step by step.\nAccording to this question, the objects are arranged in a row, from left to right, as follows: (1) a teal plate, (2) a burgundy keychain, (3) a yellow scrunchiephone charger, (4) an orange mug, (5) a pink notebook, (6) a grey cup.\nThe teal plate is the first item, namely (1). There is no item to the left of the teal item.\nThe number of non-orange items to the left of the teal item is zero. So the answer is (A).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_reasoning_about_colored_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/ruin_names.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/ruin_names.yaml new file mode 100644 index 00000000..d6a3485b --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/ruin_names.yaml @@ -0,0 +1,5 @@ +"dataset_name": "ruin_names" +"description": "Select the humorous edit that 'ruins' the input movie or musical artist name.\n\n" +"doc_to_text": " Which of the following is a humorous edit of this artist or movie name: 'whitesnake'?\nOptions:\n(A) whitesnape\n(B) whitesnapke\n(C) whitesnuake\n(D) mwhitesnake\nA: Let's think step by step.\nThe original name is \"whitesnake\". This is the name of an old English hard rock band. It is a compound word, formed by the words \"white\" and \"snake\".\n(A) \"whitesnape\": It is formed by the combination of \"white\" and \"snake\"; therefore, \"snake\" has been changed to \"snape\". Snape makes a reference to the fictional character Severus Snape in the Harry Potter series, so (A) is indeed a meaningful and funny edit.\n(B) \"whitesnapke\": It is formed by the combination of \"white\" and \"snapke\", but \"snapke\" is not an actual word; therefore, \"whitesnapke\" is not humorous.\n(C) \"whitesnuake\": It is formed by the combination of \"white\" and \"snuake\", but \"snuake\" is not an actual word; therefore, \"whitesnuake\" is not humorous.\n(D) \"mwhitesnake\": It is formed by the combination of \"m\", \"white\", and \"snake\", but the prefix \"-m \"seems arbitrary; therefore, \"mwhitesnake\" is not meaningful or humorous.\nAbove the above, the only humorous edit is (A). So the answer is (A). Which of the following is a humorous edit of this artist or movie name: 'one of our dinosaurs is missing'?\nOptions:\n(A) ofne of our dinosaurs is missing\n(B) one af our dinosaurs is missing\n(C) one of our dinosaurs is pissing\n(D) one of our dinosaur is missing\nA: Let's think step by step.\nThe original name is \"one of our dinosaurs is missing\". This is the name of an old British movie.\n(A) \"ofne of our dinosaurs is missing\": Here \"one of\" is changed to \"ofne\", but the word \"ofne\" is not an actual word.\n(B) \"one af our dinosaurs is missing\": Here the word \"of\" is changed to \"af\", but the word \"af\" is not an actual word.\n(C) \"one of our dinosaurs is pissing\": Here the word \"missing\" is changed to \"pissing\", and \"one of our dinosaurs is pissing\" is indeed a very whimsical and mischievous edit. This change truly ruins the original title of the movie.\n(D) \"one of our dinosaur is missing\": Here the word \"dinosaurs\" is changed to \"dinosaur\", but \"dinosaur\" is singular but should be plural in the title; this change therefore feels arbitrary and not humorous.\nAbove the above, the only humorous edit is (C).\nAbove the above, the only humorous edit is (C). So the answer is (C). Which of the following is a humorous edit of this artist or movie name: 'counting crows'?\nOptions:\n(A) countingy crows\n(B) counting cows\n(C) courting crows\n(D) coutnting crows\nA: Let's think step by step.\nThe original name is \"counting crows\". This is the name of an American rock band. Historically, the band name comes from the British nursery rhyme \"One for Sorrow\", which is about counting of magpies.\n(A) \"countingy crows\": Here the word \"counting\" is changed to \"countingy\", but the word \"countingy\" is not an actual word.\n(B) \"counting cows\": Here the word \"crows\" is changed to \"cows\", and this is indeed a playful and meaningful edit that ruins the original name of the band.\n(C) \"courting crows\": Here the word \"counting\" is changed to \"courting\", and \"courting\" is an actual word; however, \"courting crows\" does not sound as humorous as \"counting cows\".\n(D) \"coutnting crows\": Here the word \"counting\" is changed to \"coutnting\", but the word \"coutnting\" is not an actual word.\nAbove the above, the only humorous edit is (B). So the answer is (B).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_ruin_names" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/salient_translation_error_detection.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/salient_translation_error_detection.yaml new file mode 100644 index 00000000..2aa42072 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/salient_translation_error_detection.yaml @@ -0,0 +1,5 @@ +"dataset_name": "salient_translation_error_detection" +"description": "Detect the type of error in an English translation of a German source sentence.\n\n" +"doc_to_text": " The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: In der Liste der Baudenkmale in Lenzen (Elbe) sind alle Baudenkmale der brandenburgischen Stadt Lenzen (Elbe) und ihrer Ortsteile aufgelistet.\nTranslation: In the list of architectural monuments in Lenzen all architectural monuments of the Brandenburg city of Lenzen and its districts are listed.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: Let's think step by step.\nWe solve this question by first translating the source sentence to English and then by comparing our translation with the provided translation. According to Google Translate, the correct translation of the source sentence from German to English is \"The list of monuments in Lenzen (Elbe) includes all the monuments in the Brandenburg town of Lenzen (Elbe) and its districts.\" On the other hand, the provided translation is \"In the list of architectural monuments in Lenzen all architectural monuments of the Brandenburg city of Lenzen and its districts are listed.\" Note that Lenzen (Elbe) is changed to Lenzen in the original translation; so, there is a named entity error. Because an entity in the original source sentence is changed to a different entity in the translation, the translation contains an error pertaining to Named Entities. So the answer is (D). The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: Auf dieser Seite sind die Baudenkmäler der oberbayerischen Großen Kreisstadt Landsberg am Lech zusammengestellt.\nTranslation: On this page are compiled the architectural monuments of the town of Landsberg am Lech.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: Let's think step by step.\nWe solve this question by first translating the source sentence to English and then by comparing our translation with the provided translation. According to Google Translate, the correct translation of the source sentence from German to English is \"The monuments of the Upper Bavarian district town of Landsberg am Lech are compiled on this page.\" On the other hand, the provided translation is \"On this page are compiled the architectural monuments of the town of Landsberg am Lech.\" Note that an important detail about the location of Landsberg am Lech is omitted in the original translation: The translation should have said \"Upper Bavarian district town of Landsberg am Lech\". Because a significant clause in the translation was removed, the translation contains an error pertaining to Dropped Content. So the answer is (E). The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: Łeba ist eine Kleinstadt und ein Badeort im Powiat Lęborski der polnischen Woiwodschaft Pommern.\nTranslation: Eba is not a small town and seaside resort in the Powiat Léborski county of the Pomeranian Voivodeship of Poland.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: Let's think step by step.\nWe solve this question by first translating the source sentence to English and then by comparing our translation with the provided translation. According to Google Translate, the correct translation of the source sentence from German to English is \"Łeba is a small town and seaside resort in the Powiat Lęborski of the Polish Pomeranian Voivodeship.\" On the other hand, the provided translation is \"Łeba is not a small town and seaside resort in the Powiat Léborski county of the Pomeranian Voivodeship of Poland.\" Note that the provided sentence says, \"Łeba is not a small town ...\" However, the translation should have been \"Łeba is a small town ...\" Because a negation is introduced at the beginning of the sentence and has fundamentally changed the meaning of the original source, the translation contains an error pertaining to Negation or Antonyms. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_salient_translation_error_detection" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/snarks.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/snarks.yaml new file mode 100644 index 00000000..0138faa7 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/snarks.yaml @@ -0,0 +1,5 @@ +"dataset_name": "snarks" +"description": "Determine which of two sentences is sarcastic.\n\nAccording to Cambridge University Dictionary, sarcasm is \"the use of remarks that clearly mean the opposite of what they say, made in order to hurt someone's feelings or to criticize something in a humorous way.\" Sarcastic sentences often contain satirical or ironic utterances, hyperboles, ambivalent or witty remarks.\n\n" +"doc_to_text": " Which statement is sarcastic?\nOptions:\n(A) Yes, because having interests and actively researching them is a huge waste\n(B) Yes, because having interests and actively researching them is a huge deal\nA: Let's think step by step.\nIf we look at (A), it says that having interests and actively researching them is a huge waste, implying that it is a useless effort. However, we know that having interests and actively researching them is typically not a waste but rather is beneficial to the individual. The presence of such a juxtaposition in (A) suggests that it contains a taste of irony and sarcasm.\nIf we look at (B), it says that having interests and actively researching them is a huge deal, implying that it is an important and consequential effort. This is arguably a neutral and correct statement.\nAbove the above, the sarcastic option is (A). So the answer is (A). Which statement is sarcastic?\nOptions:\n(A) No one is going to disagree with you on this. Avoiding ad hominem attacks really help your case\n(B) No one is going to disagree with you on this. Ad hominem attacks really help your case\nA: Let's think step by step.\nIf we look at (A), it says that avoiding ad hominem attacks really help your case, implying that ad hominem attacks are adverse and injurious. Because ad hominem attacks are adressed at a person rather than an idea, it is indeed true that avoiding them is often useful and helpful; so, (A) is a neutral (valid and agreeable) statement.\nIf we look at (B), it says that ad hominem attacks really help your case, implying that ad hominem attacks are a positive thing. However, we stated previously that ad hominem attacks are often not useful or constructive. The speaker in this sentence therefore seems to mean the opposite of what they are saying; so, there appears to have a taste of irony and sarcasm in (B).\nAbove the above, the sarcastic option is (B). So the answer is (B). Which statement is sarcastic?\nOptions:\n(A) Consistency in the league's punishments? What do you think this is supposed to be, politics?\n(B) Consistency in the league's punishments? What do you think this is supposed to be, moral?\nA: Let's think step by step.\nIf we look at (A), it likens the consistency in the league's punishments with that in politics. Because politics or political affairs are often not considered to be consistent or dependable, this sentence appears to be satirical.\nIf we look at (B), it likens the consistency in the league's punishments with that in morality. Discussing the consistency of the league's punishments in the context of morality, ethics, or law makes sense and does not appear to make a satirical point about anything.\nAbove the above, the sarcastic option is (A). So the answer is (A).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_snarks" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/sports_understanding.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/sports_understanding.yaml new file mode 100644 index 00000000..90c0f191 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/sports_understanding.yaml @@ -0,0 +1,5 @@ +"dataset_name": "sports_understanding" +"description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n" +"doc_to_text": " Is the following sentence plausible? \"Bam Adebayo scored a reverse layup in the Western Conference Finals.\"\nA: Let's think step by step. Bam Adebayo is an American basketball player. Scoring a reverse layup in the Western Conference Finals is part of the NBA Finals. So the answer is yes. Is the following sentence plausible? \"Santi Cazorla scored a touchdown.\"\nA: Let's think step by step. Santi Cazorla is a soccer player. Touchdown is part of American football and rugby. So the answer is no. Is the following sentence plausible? \"DeMar DeRozan was called for the goal tend.\"\nA: Let's think step by step. DeMar DeRozan is an American basketball player. Goal tending is part of basketball. So the answer is yes.Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_sports_understanding" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/temporal_sequences.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/temporal_sequences.yaml new file mode 100644 index 00000000..cff56746 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/temporal_sequences.yaml @@ -0,0 +1,5 @@ +"dataset_name": "temporal_sequences" +"description": "Task description: Answer questions about which times certain events could have occurred.\n\n" +"doc_to_text": " Today, Emily went to the museum. Between what times could they have gone?\nWe know that:\nEmily woke up at 1pm.\nElizabeth saw Emily reading at the library from 2pm to 4pm.\nJessica saw Emily watching a movie at the theater from 4pm to 5pm.\nLeslie saw Emily waiting at the airport from 5pm to 6pm.\nWilliam saw Emily buying clothes at the mall from 6pm to 7pm.\nThe museum was closed after 7pm.\nBetween what times could Emily have gone to the museum?\nOptions:\n(A) 1pm to 2pm\n(B) 6pm to 7pm\n(C) 5pm to 6pm\n(D) 2pm to 4pm\nA: Let's think step by step.\nWake-up time: 1pm.\n1pm-2pm: free.\n2pm-4pm: reading at the library.\n4pm-5pm: watching a movie at the theater.\n5pm-6pm: waiting at the airport.\n6pm-7pm: buying clothes at the mall.\nThe museum closure time: 7pm.\nThe only time when Emily could have gone to the museum was 1pm to 2pm. So the answer is (A). Today, Elizabeth went to the amusement park. Between what times could they have gone?\nWe know that:\nElizabeth woke up at 7am.\nDavid saw Elizabeth fixing their computer at the electronic store from 1pm to 2pm.\nSarah saw Elizabeth playing tennis at the tennis court from 2pm to 3pm.\nSusan saw Elizabeth walking towards the Statue of Liberty from 3pm to 6pm.\nAndrew saw Elizabeth taking photos near the Eiffel Tower from 6pm to 9pm.\nEmily saw Elizabeth getting a coffee at the cafe from 9pm to 10pm.\nThe amusement park was closed after 10pm.\nBetween what times could Elizabeth have gone to the amusement park?\nOptions:\n(A) 7am to 1pm\n(B) 9pm to 10pm\n(C) 1pm to 2pm\n(D) 3pm to 6pm\nA: Let's think step by step.\nWake-up time: 7am.\n7am-1pm: free.\n1pm-2pm: fixing their computer at the electronic store.\n2pm-3pm: playing tennis at the tennis court.\n3pm-6pm: walking towards the Statue of Liberty.\n6pm-9pm: taking photos near the Eiffel Tower.\n9pm-10pm: getting a coffee at the cafe.\nThe amusement park closure time: 10pm.\nThe only time when Elizabeth could have gone to the amusement park was 7am to 1pm. So the answer is (A). Today, Tiffany went to the beach. Between what times could they have gone?\nWe know that:\nTiffany woke up at 5am.\nBetty saw Tiffany getting a coffee at the cafe from 5am to 6am.\nJessica saw Tiffany working at the office from 6am to 9am.\nJohn saw Tiffany stretching at a yoga studio from 9am to 12pm.\nSean saw Tiffany sitting on a rooftop from 12pm to 2pm.\nSarah saw Tiffany playing tennis at the tennis court from 2pm to 3pm.\nThe beach was closed after 4pm.\nBetween what times could Tiffany have gone to the beach?\nOptions:\n(A) 9am to 12pm\n(B) 12pm to 2pm\n(C) 5am to 6am\n(D) 3pm to 4pm\nA: Let's think step by step.\nWake-up time: 5am.\n5am-6am: getting a coffee at the cafe.\n6am-9am: working at the office.\n9am-12pm: stretching at a yoga studio.\n12pm-2pm: sitting on a rooftop.\n2pm-3pm: playing tennis at the tennis court.\n3pm-4pm: free.\nThe beach closure time: 4pm.\nThe only time when Tiffany could have gone to the beach was 3pm to 4pm. So the answer is (D).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_temporal_sequences" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_five_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_five_objects.yaml new file mode 100644 index 00000000..a4fd3995 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_five_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_five_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": " Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: yellow, Bob: blue, Claire: pink.\n(1) Claire and Alice swap balls: Alice: pink, Bob: blue, Claire: yellow.\n(2) Alice and Bob swap balls: Alice: blue, Bob: pink, Claire: yellow.\n(3) Claire and Bob swap balls: Alice: blue, Bob: yellow, Claire: pink.\nAt the end of the game, Bob has the yellow ball. So the answer is (A). Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: white, Bob: purple, Claire: pink.\n(1) Bob and Alice swap balls: Alice: purple, Bob: white, Claire: pink.\n(2) Bob and Claire swap balls: Alice: purple, Bob: pink, Claire: white.\n(3) Bob and Alice swap balls: Alice: pink, Bob: purple, Claire: white.\nAt the end of the game, Alice has the pink ball. So the answer is (C). Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: Let's think step by step.\n(0) At the start: Alice: Lola, Bob: Rodrigo, Claire: Patrick.\n(1) Alice and Bob switch partners: Alice: Rodrigo, Bob: Lola, Claire: Patrick.\n(2) Claire and Bob switch partners: Alice: Rodrigo, Bob: Patrick, Claire: Lola.\n(3) Bob and Alice switch partners: Alice: Patrick, Bob: Rodrigo, Claire: Lola.\nAt the end of the dance, Alice is dancing with Patrick. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_tracking_shuffled_objects_five_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_seven_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_seven_objects.yaml new file mode 100644 index 00000000..28ff5389 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_seven_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_seven_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": " Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: yellow, Bob: blue, Claire: pink.\n(1) Claire and Alice swap balls: Alice: pink, Bob: blue, Claire: yellow.\n(2) Alice and Bob swap balls: Alice: blue, Bob: pink, Claire: yellow.\n(3) Claire and Bob swap balls: Alice: blue, Bob: yellow, Claire: pink.\nAt the end of the game, Bob has the yellow ball. So the answer is (A). Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: white, Bob: purple, Claire: pink.\n(1) Bob and Alice swap balls: Alice: purple, Bob: white, Claire: pink.\n(2) Bob and Claire swap balls: Alice: purple, Bob: pink, Claire: white.\n(3) Bob and Alice swap balls: Alice: pink, Bob: purple, Claire: white.\nAt the end of the game, Alice has the pink ball. So the answer is (C). Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: Let's think step by step.\n(0) At the start: Alice: Lola, Bob: Rodrigo, Claire: Patrick.\n(1) Alice and Bob switch partners: Alice: Rodrigo, Bob: Lola, Claire: Patrick.\n(2) Claire and Bob switch partners: Alice: Rodrigo, Bob: Patrick, Claire: Lola.\n(3) Bob and Alice switch partners: Alice: Patrick, Bob: Rodrigo, Claire: Lola.\nAt the end of the dance, Alice is dancing with Patrick. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_tracking_shuffled_objects_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_three_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_three_objects.yaml new file mode 100644 index 00000000..84526656 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_three_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_three_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": " Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: yellow, Bob: blue, Claire: pink.\n(1) Claire and Alice swap balls: Alice: pink, Bob: blue, Claire: yellow.\n(2) Alice and Bob swap balls: Alice: blue, Bob: pink, Claire: yellow.\n(3) Claire and Bob swap balls: Alice: blue, Bob: yellow, Claire: pink.\nAt the end of the game, Bob has the yellow ball. So the answer is (A). Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: white, Bob: purple, Claire: pink.\n(1) Bob and Alice swap balls: Alice: purple, Bob: white, Claire: pink.\n(2) Bob and Claire swap balls: Alice: purple, Bob: pink, Claire: white.\n(3) Bob and Alice swap balls: Alice: pink, Bob: purple, Claire: white.\nAt the end of the game, Alice has the pink ball. So the answer is (C). Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: Let's think step by step.\n(0) At the start: Alice: Lola, Bob: Rodrigo, Claire: Patrick.\n(1) Alice and Bob switch partners: Alice: Rodrigo, Bob: Lola, Claire: Patrick.\n(2) Claire and Bob switch partners: Alice: Rodrigo, Bob: Patrick, Claire: Lola.\n(3) Bob and Alice switch partners: Alice: Patrick, Bob: Rodrigo, Claire: Lola.\nAt the end of the dance, Alice is dancing with Patrick. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_tracking_shuffled_objects_three_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/web_of_lies.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/web_of_lies.yaml new file mode 100644 index 00000000..69857561 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/web_of_lies.yaml @@ -0,0 +1,5 @@ +"dataset_name": "web_of_lies" +"description": "Evaluate a random boolean function expressed as a word problem.\n\n" +"doc_to_text": " Question: Fidel tells the truth. Jerry says Fidel tells the truth. Vina says Jerry tells the truth. Millicent says Vina lies. Raymond says Millicent lies. Does Raymond tell the truth?\nA: Let's think step by step.\n(1) Fidel tells the truth. So, we know that Fidel tells the truth.\n(2) Jerry says Fidel tells the truth. Since we know from (1) that Fidel tells the truth, if Jerry says that Fidel tells the truth, then Jerry tells the truth.\n(3) Vina says Jerry tells the truth. Since we know from (2) that Jerry tells the truth, if Vina says Jerry tells the truth, then Vine tells the truth.\n(4) Millicent says Vina lies. Since we know from (3) that Vina tells the truth, if Millicent says Vina lies, then Millicent lies.\n(5) Raymond says Millicent lies. Since we know from (4) that Millicent lies, if Raymond says Millicent lies, then Raymond tells the truth.\nNow, the question asks: Does Raymond tell the truth? We know from (5) that Raymond tells the truth. So the answer is Yes. Question: Kristian lies. Millie says Kristian lies. Maybelle says Millie tells the truth. Fidel says Maybelle lies. Leda says Fidel lies. Does Leda tell the truth?\nA: Let's think step by step.\n(1) Kristian lies. So, we know that Kristian lies.\n(2) Millie says Kristian lies. Since we know from (1) that Kristian lies, if Millie says Kristian lies, then Millie tells the truth.\n(3) Maybelle says Millie tells the truth. Since we know from (2) that Millie tells the truth, if Maybelle says Millie tells the truth, then Maybelle tells the truth.\n(4) Fidel says Maybelle lies. Since we know from (3) that Maybelle tells the truth, if Fidel says Maybelle lies, then Fidel lies.\n(5) Leda says Fidel lies. Since we know from (4) that Fidel lies, if Leda says Fidel lies, then Leda tells the truth.\nNow, the question asks: Does Leda tell the truth? We know from (5) that Leda tells the truth. So the answer is Yes. Question: Kristian tells the truth. Michaela says Kristian lies. Raymond says Michaela tells the truth. Osvaldo says Raymond tells the truth. Jamey says Osvaldo tells the truth. Does Jamey tell the truth?\nA: Let's think step by step.\n(1) Kristian tells the truth. So, we know that Kristian tells the truth.\n(2) Michaela says Kristian lies. Since we know from (1) that Kristian tells the truth, if Michaela says Kristian lies, then Michaela lies.\n(3) Raymond says Michaela tells the truth. Since we know from (2) that Michaela lies, if Raymond says Michaela tells the truth, then Raymond lies.\n(4) Osvaldo says Raymond tells the truth. Since we know from (3) that Raymond lies, if Osvaldo says Raymond tells the truth, then Osvaldo lies.\n(5) Jamey says Osvaldo tells the truth. Since we know from (4) that Osvaldo lies, if Jamey says Osvaldo tells the truth, then Jamey lies.\nNow, the question asks: Does Jamey tell the truth? We know from (5) that Jamey lies. So the answer is No.Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_web_of_lies" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/word_sorting.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/word_sorting.yaml new file mode 100644 index 00000000..0e2e1a88 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/word_sorting.yaml @@ -0,0 +1,5 @@ +"dataset_name": "word_sorting" +"description": "Sort a list of words.\n\n" +"doc_to_text": " Sort the following words alphabetically: List: oven costume counterpart\nA: Let's think step by step.\nThe first letter: \"oven\": \"o\" (15). \"costume\": \"c\" (3). \"counterpart\": \"c\" (3). We now have: (3) [\"costume\" ? \"counterpart\"] < (15) \"oven\". Now let's sort this subpart [\"costume\" ? \"counterpart\"] by looking at their second letters.\nThe second letter: \"costume\": \"o\" (15). \"counterpart\": \"o\" (15). We now have: (15) [\"costume\" ? \"counterpart\"]. Now let's sort this subpart [\"costume\" ? \"counterpart\"] by looking at their third letters.\nThe third letter: \"costume\": \"s\" (19). \"counterpart\": \"u\" (21). We now have: (19) \"costume\" < (21) \"counterpart\". Hence, we have [\"costume\" < \"counterpart\"] < \"oven\". So the answer is costume counterpart oven. Sort the following words alphabetically: List: hypochlorite ponderosa phone credulity\nA: Let's think step by step.\nThe first letter: \"hypochlorite\": \"h\" (8). \"ponderosa\": \"p\" (16). \"phone\": \"p\" (16). \"credulity\": \"c\" (3). We now have: (3) \"credulity\" < (8) \"hypochlorite\" < (16) [\"ponderosa\" ? \"phone\"]. Now let's sort this subpart [\"ponderosa\" ? \"phone\"] by looking at their second letters.\nThe second letter: \"ponderosa\": \"o\" (15). \"phone\": \"h\" (8). We now have: (8) \"phone\" < (15) \"ponderosa\". Hence, we have \"credulity\" < \"hypochlorite\" < [\"phone\" <\"ponderosa\"]. So the answer is credulity hypochlorite phone ponderosa. Sort the following words alphabetically: List: newt arson parthia seismography mugho aspect census\nA: Let's think step by step.\nThe first letter: \"newt\": \"n\" (14). \"arson\": \"a\" (1). \"parthia\": \"p\" (16). \"seismography\": \"s\" (19). \"mugho\": \"m\" (13). \"aspect\": \"a\" (1). \"census\": \"c\" (3). We now have: (1) [\"arson\" ? \"aspect\"] < (3) \"census\" < (13) \"mugho\" < (14) \"newt\" < (16) \"parthia\" < (19) \"seismography\". Now let's sort this subpart [\"arson\" ? \"aspect\"] by looking at their second letters.\nThe second letter: \"arson\": \"r\" (18). \"aspect\": \"s\" (19). We now have: (18) \"arson\" < (19) \"aspect\". Hence, we have [\"arson\" < \"aspect\"] < \"census\" < \"mugho\" < \"newt\" < \"parthia\" < \"seismography\". So the answer is arson aspect census mugho newt parthia seismography.Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_fewshot_word_sorting" -- GitLab From c06b0d6efb48abe7f171d283ffad765ee204f72a Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 10:40:18 +0000 Subject: [PATCH 102/264] add flan_cot_zeroshot --- .../_flan_cot_zeroshot_template_yaml | 22 +++++++++++++++++++ .../boolean_expressions.yaml | 5 +++++ .../flan_cot_zeroshot/causal_judgement.yaml | 5 +++++ .../flan_cot_zeroshot/date_understanding.yaml | 5 +++++ .../flan_cot_zeroshot/disambiguation_qa.yaml | 5 +++++ .../bbh/flan_cot_zeroshot/dyck_languages.yaml | 5 +++++ .../flan_cot_zeroshot/formal_fallacies.yaml | 5 +++++ .../flan_cot_zeroshot/geometric_shapes.yaml | 5 +++++ .../bbh/flan_cot_zeroshot/hyperbaton.yaml | 5 +++++ .../logical_deduction_five_objects.yaml | 5 +++++ .../logical_deduction_seven_objects.yaml | 5 +++++ .../logical_deduction_three_objects.yaml | 5 +++++ .../movie_recommendation.yaml | 5 +++++ .../multistep_arithmetic_two.yaml | 5 +++++ .../tasks/bbh/flan_cot_zeroshot/navigate.yaml | 5 +++++ .../flan_cot_zeroshot/object_counting.yaml | 5 +++++ .../penguins_in_a_table.yaml | 5 +++++ .../reasoning_about_colored_objects.yaml | 5 +++++ .../bbh/flan_cot_zeroshot/ruin_names.yaml | 5 +++++ .../salient_translation_error_detection.yaml | 5 +++++ .../tasks/bbh/flan_cot_zeroshot/snarks.yaml | 5 +++++ .../sports_understanding.yaml | 5 +++++ .../flan_cot_zeroshot/temporal_sequences.yaml | 5 +++++ ...racking_shuffled_objects_five_objects.yaml | 5 +++++ ...acking_shuffled_objects_seven_objects.yaml | 5 +++++ ...acking_shuffled_objects_three_objects.yaml | 5 +++++ .../bbh/flan_cot_zeroshot/web_of_lies.yaml | 5 +++++ .../bbh/flan_cot_zeroshot/word_sorting.yaml | 5 +++++ 28 files changed, 157 insertions(+) create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/boolean_expressions.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/causal_judgement.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/date_understanding.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/disambiguation_qa.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/dyck_languages.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/formal_fallacies.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/geometric_shapes.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/hyperbaton.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_five_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_seven_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_three_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/movie_recommendation.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/multistep_arithmetic_two.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/navigate.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/object_counting.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/penguins_in_a_table.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/reasoning_about_colored_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/ruin_names.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/salient_translation_error_detection.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/snarks.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/sports_understanding.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/temporal_sequences.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_five_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_three_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/web_of_lies.yaml create mode 100644 lm_eval/tasks/bbh/flan_cot_zeroshot/word_sorting.yaml diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml new file mode 100644 index 00000000..0f6fc880 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml @@ -0,0 +1,22 @@ +group: bbh_flan_zeroshot +dataset_path: lukaemon/bbh +output_type: greedy_until +test_split: test +doc_to_target: "{{target}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "(?<=The answer is )(.*)(?=.)" + - function: "take_first" \ No newline at end of file diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/boolean_expressions.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/boolean_expressions.yaml new file mode 100644 index 00000000..04a6d1e5 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/boolean_expressions.yaml @@ -0,0 +1,5 @@ +"dataset_name": "boolean_expressions" +"description": "Evaluate the result of a random Boolean expression.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_boolean_expressions" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/causal_judgement.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/causal_judgement.yaml new file mode 100644 index 00000000..73ed31c8 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/causal_judgement.yaml @@ -0,0 +1,5 @@ +"dataset_name": "causal_judgement" +"description": "Answer questions about causal attribution.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_causal_judgement" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/date_understanding.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/date_understanding.yaml new file mode 100644 index 00000000..11ad75db --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/date_understanding.yaml @@ -0,0 +1,5 @@ +"dataset_name": "date_understanding" +"description": "Infer the date from context.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_date_understanding" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/disambiguation_qa.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/disambiguation_qa.yaml new file mode 100644 index 00000000..b8b13d78 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/disambiguation_qa.yaml @@ -0,0 +1,5 @@ +"dataset_name": "disambiguation_qa" +"description": "Clarify the meaning of sentences with ambiguous pronouns.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_disambiguation_qa" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/dyck_languages.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/dyck_languages.yaml new file mode 100644 index 00000000..5995b6c4 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/dyck_languages.yaml @@ -0,0 +1,5 @@ +"dataset_name": "dyck_languages" +"description": "Correctly close a Dyck-n word.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_dyck_languages" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/formal_fallacies.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/formal_fallacies.yaml new file mode 100644 index 00000000..6b029e7e --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/formal_fallacies.yaml @@ -0,0 +1,5 @@ +"dataset_name": "formal_fallacies" +"description": "Distinguish deductively valid arguments from formal fallacies.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_formal_fallacies" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/geometric_shapes.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/geometric_shapes.yaml new file mode 100644 index 00000000..acb91aa4 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/geometric_shapes.yaml @@ -0,0 +1,5 @@ +"dataset_name": "geometric_shapes" +"description": "Name geometric shapes from their SVG paths.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_geometric_shapes" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/hyperbaton.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/hyperbaton.yaml new file mode 100644 index 00000000..dbe1280b --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/hyperbaton.yaml @@ -0,0 +1,5 @@ +"dataset_name": "hyperbaton" +"description": "Order adjectives correctly in English sentences.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_hyperbaton" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_five_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_five_objects.yaml new file mode 100644 index 00000000..5592252a --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_five_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_five_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_logical_deduction_five_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_seven_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_seven_objects.yaml new file mode 100644 index 00000000..c85b9d21 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_seven_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_seven_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_logical_deduction_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_three_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_three_objects.yaml new file mode 100644 index 00000000..e94f8e4a --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_three_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_three_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_logical_deduction_three_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/movie_recommendation.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/movie_recommendation.yaml new file mode 100644 index 00000000..038119fc --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/movie_recommendation.yaml @@ -0,0 +1,5 @@ +"dataset_name": "movie_recommendation" +"description": "Recommend movies similar to the given list of movies.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_movie_recommendation" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/multistep_arithmetic_two.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/multistep_arithmetic_two.yaml new file mode 100644 index 00000000..c21c1b8c --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/multistep_arithmetic_two.yaml @@ -0,0 +1,5 @@ +"dataset_name": "multistep_arithmetic_two" +"description": "Solve multi-step arithmetic problems.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_multistep_arithmetic_two" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/navigate.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/navigate.yaml new file mode 100644 index 00000000..c8ea0681 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/navigate.yaml @@ -0,0 +1,5 @@ +"dataset_name": "navigate" +"description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_navigate" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/object_counting.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/object_counting.yaml new file mode 100644 index 00000000..6d6a4721 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/object_counting.yaml @@ -0,0 +1,5 @@ +"dataset_name": "object_counting" +"description": "Questions that involve enumerating objects and asking the model to count them.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_object_counting" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/penguins_in_a_table.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/penguins_in_a_table.yaml new file mode 100644 index 00000000..c5501700 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/penguins_in_a_table.yaml @@ -0,0 +1,5 @@ +"dataset_name": "penguins_in_a_table" +"description": "Answer questions about a table of penguins and their attributes.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_penguins_in_a_table" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/reasoning_about_colored_objects.yaml new file mode 100644 index 00000000..26789385 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/reasoning_about_colored_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "reasoning_about_colored_objects" +"description": "Answer extremely simple questions about the colors of objects on a surface.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_reasoning_about_colored_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/ruin_names.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/ruin_names.yaml new file mode 100644 index 00000000..3289b750 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/ruin_names.yaml @@ -0,0 +1,5 @@ +"dataset_name": "ruin_names" +"description": "Select the humorous edit that 'ruins' the input movie or musical artist name.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_ruin_names" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/salient_translation_error_detection.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/salient_translation_error_detection.yaml new file mode 100644 index 00000000..c8113e62 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/salient_translation_error_detection.yaml @@ -0,0 +1,5 @@ +"dataset_name": "salient_translation_error_detection" +"description": "Detect the type of error in an English translation of a German source sentence.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_salient_translation_error_detection" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/snarks.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/snarks.yaml new file mode 100644 index 00000000..b9da41c7 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/snarks.yaml @@ -0,0 +1,5 @@ +"dataset_name": "snarks" +"description": "Determine which of two sentences is sarcastic.\n\nAccording to Cambridge University Dictionary, sarcasm is \"the use of remarks that clearly mean the opposite of what they say, made in order to hurt someone's feelings or to criticize something in a humorous way.\" Sarcastic sentences often contain satirical or ironic utterances, hyperboles, ambivalent or witty remarks.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_snarks" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/sports_understanding.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/sports_understanding.yaml new file mode 100644 index 00000000..dbf21164 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/sports_understanding.yaml @@ -0,0 +1,5 @@ +"dataset_name": "sports_understanding" +"description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_sports_understanding" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/temporal_sequences.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/temporal_sequences.yaml new file mode 100644 index 00000000..84db7993 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/temporal_sequences.yaml @@ -0,0 +1,5 @@ +"dataset_name": "temporal_sequences" +"description": "Task description: Answer questions about which times certain events could have occurred.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_temporal_sequences" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_five_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_five_objects.yaml new file mode 100644 index 00000000..4b6ec1ad --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_five_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_five_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_tracking_shuffled_objects_five_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml new file mode 100644 index 00000000..99dbcc33 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_seven_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_tracking_shuffled_objects_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_three_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_three_objects.yaml new file mode 100644 index 00000000..4f9ff8e7 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_three_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_three_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_tracking_shuffled_objects_three_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/web_of_lies.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/web_of_lies.yaml new file mode 100644 index 00000000..5304cdfc --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/web_of_lies.yaml @@ -0,0 +1,5 @@ +"dataset_name": "web_of_lies" +"description": "Evaluate a random boolean function expressed as a word problem.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_web_of_lies" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/word_sorting.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/word_sorting.yaml new file mode 100644 index 00000000..62f0a6aa --- /dev/null +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/word_sorting.yaml @@ -0,0 +1,5 @@ +"dataset_name": "word_sorting" +"description": "Sort a list of words.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" +"include": "_template_yaml" +"task": "bbh_flan_cot_zeroshot_word_sorting" -- GitLab From 3531d9c162b58a92aa441111750b516d52b40a19 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 10:40:45 +0000 Subject: [PATCH 103/264] add flan_fewshot --- .../bbh/flan_fewshot/_flan_fewshot_template_yaml | 16 ++++++++++++++++ .../bbh/flan_fewshot/boolean_expressions.yaml | 5 +++++ .../tasks/bbh/flan_fewshot/causal_judgement.yaml | 5 +++++ .../bbh/flan_fewshot/date_understanding.yaml | 5 +++++ .../bbh/flan_fewshot/disambiguation_qa.yaml | 5 +++++ .../tasks/bbh/flan_fewshot/dyck_languages.yaml | 5 +++++ .../tasks/bbh/flan_fewshot/formal_fallacies.yaml | 5 +++++ .../tasks/bbh/flan_fewshot/geometric_shapes.yaml | 5 +++++ lm_eval/tasks/bbh/flan_fewshot/hyperbaton.yaml | 5 +++++ .../logical_deduction_five_objects.yaml | 5 +++++ .../logical_deduction_seven_objects.yaml | 5 +++++ .../logical_deduction_three_objects.yaml | 5 +++++ .../bbh/flan_fewshot/movie_recommendation.yaml | 5 +++++ .../flan_fewshot/multistep_arithmetic_two.yaml | 5 +++++ lm_eval/tasks/bbh/flan_fewshot/navigate.yaml | 5 +++++ .../tasks/bbh/flan_fewshot/object_counting.yaml | 5 +++++ .../bbh/flan_fewshot/penguins_in_a_table.yaml | 5 +++++ .../reasoning_about_colored_objects.yaml | 5 +++++ lm_eval/tasks/bbh/flan_fewshot/ruin_names.yaml | 5 +++++ .../salient_translation_error_detection.yaml | 5 +++++ lm_eval/tasks/bbh/flan_fewshot/snarks.yaml | 5 +++++ .../bbh/flan_fewshot/sports_understanding.yaml | 5 +++++ .../bbh/flan_fewshot/temporal_sequences.yaml | 5 +++++ .../tracking_shuffled_objects_five_objects.yaml | 5 +++++ .../tracking_shuffled_objects_seven_objects.yaml | 5 +++++ .../tracking_shuffled_objects_three_objects.yaml | 5 +++++ lm_eval/tasks/bbh/flan_fewshot/web_of_lies.yaml | 5 +++++ lm_eval/tasks/bbh/flan_fewshot/word_sorting.yaml | 5 +++++ 28 files changed, 151 insertions(+) create mode 100644 lm_eval/tasks/bbh/flan_fewshot/_flan_fewshot_template_yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/boolean_expressions.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/causal_judgement.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/date_understanding.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/disambiguation_qa.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/dyck_languages.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/formal_fallacies.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/geometric_shapes.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/hyperbaton.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/logical_deduction_five_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/logical_deduction_seven_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/logical_deduction_three_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/movie_recommendation.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/multistep_arithmetic_two.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/navigate.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/object_counting.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/penguins_in_a_table.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/reasoning_about_colored_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/ruin_names.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/salient_translation_error_detection.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/snarks.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/sports_understanding.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/temporal_sequences.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_five_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_seven_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_three_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/web_of_lies.yaml create mode 100644 lm_eval/tasks/bbh/flan_fewshot/word_sorting.yaml diff --git a/lm_eval/tasks/bbh/flan_fewshot/_flan_fewshot_template_yaml b/lm_eval/tasks/bbh/flan_fewshot/_flan_fewshot_template_yaml new file mode 100644 index 00000000..ff53aabc --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/_flan_fewshot_template_yaml @@ -0,0 +1,16 @@ +group: bbh_flan_fewshot +dataset_path: lukaemon/bbh +output_type: greedy_until +test_split: test +doc_to_target: "{{target}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 diff --git a/lm_eval/tasks/bbh/flan_fewshot/boolean_expressions.yaml b/lm_eval/tasks/bbh/flan_fewshot/boolean_expressions.yaml new file mode 100644 index 00000000..19d24f3f --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/boolean_expressions.yaml @@ -0,0 +1,5 @@ +"dataset_name": "boolean_expressions" +"description": "Evaluate the result of a random Boolean expression.\n\n" +"doc_to_text": "Q: not ( ( not not True ) ) is\nA: False\n\nQ: True and False and not True and True is\nA: False\n\nQ: not not ( not ( False ) ) is\nA: True\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_boolean_expressions" diff --git a/lm_eval/tasks/bbh/flan_fewshot/causal_judgement.yaml b/lm_eval/tasks/bbh/flan_fewshot/causal_judgement.yaml new file mode 100644 index 00000000..b9dd8f6e --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/causal_judgement.yaml @@ -0,0 +1,5 @@ +"dataset_name": "causal_judgement" +"description": "Answer questions about causal attribution.\n\n" +"doc_to_text": "Q: How would a typical person answer each of the following questions about causation?\nFrank T., had an ongoing dispute with his neighbor over a stretch of land and one day decided to shoot his neighbor in the body. Frank T. had no experience with guns, his hand slipped on the barrel of the gun, and the shot went wild. Nonetheless, the bullet bounced off a large boulder several feet away and hit the neighbor's body, causing significant injury. Did Frank T. intentionally shoot his neighbor in the body?\nOptions:\n- Yes\n- No\nA: No\n\nQ: How would a typical person answer each of the following questions about causation?\nSuzy and Billy are working on a project that is very important for our nation's security. The boss tells them both: \"Be sure that you are here at exactly 9 am. It is absolutely essential that you arrive at that time.\" Both Billy and Suzy arrive at 9 am. As it happens, there was a motion detector installed in the room where they arrived. The motion detector was set up to be triggered if at least one person appeared in the room at the same time. So the motion detector went off. Did Billy cause the motion detector to go off?\nOptions:\n- Yes\n- No\nA: Yes\n\nQ: How would a typical person answer each of the following questions about causation?\nGeorge and his sister Lena reunite at their parents' house for Thanksgiving. Whereas George just got into medical school, Lena is unhappy in her marriage and recently lost her job. Over the course of the day, George and Lena get into a number of heated arguments. Later in the afternoon they play a game of darts. They split the first two games, and the third game is close until the end. Who will win comes down to George's last shot. If he hits a high point region, he wins; if he hits a low point region, Lena wins. George thinks of the difficult time Lena is having, and he really wants to let her win. He aims the dart at the low point region. He sets up his shot and the dart lands in the low point region. After his shot, Lena wins the game and is very happy. Did George hit the low point region intentionally?\nOptions:\n- Yes\n- No\nA: Yes\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_causal_judgement" diff --git a/lm_eval/tasks/bbh/flan_fewshot/date_understanding.yaml b/lm_eval/tasks/bbh/flan_fewshot/date_understanding.yaml new file mode 100644 index 00000000..5ed01c22 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/date_understanding.yaml @@ -0,0 +1,5 @@ +"dataset_name": "date_understanding" +"description": "Infer the date from context.\n\n" +"doc_to_text": "Q: Today is Christmas Eve of 1937. What is the date 10 days ago in MM/DD/YYYY?\nOptions:\n(A) 12/14/2026\n(B) 12/14/1950\n(C) 12/14/2007\n(D) 12/14/1937\n(E) 07/14/1938\n(F) 12/14/1988\nA: (D)\n\nQ: Tomorrow is 11/12/2019. What is the date one year ago from today in MM/DD/YYYY?\nOptions:\n(A) 09/04/2018\n(B) 11/11/2018\n(C) 08/25/2018\n(D) 11/02/2018\n(E) 11/04/2018\nA: (B)\n\nQ: Jane and John married on Jan 2, 1958. It is their 5-year anniversary today. What is the date tomorrow in MM/DD/YYYY?\nOptions:\n(A) 01/11/1961\n(B) 01/03/1963\n(C) 01/18/1961\n(D) 10/14/1960\n(E) 01/03/1982\n(F) 12/03/1960\nA: (B)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_date_understanding" diff --git a/lm_eval/tasks/bbh/flan_fewshot/disambiguation_qa.yaml b/lm_eval/tasks/bbh/flan_fewshot/disambiguation_qa.yaml new file mode 100644 index 00000000..0c04056f --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/disambiguation_qa.yaml @@ -0,0 +1,5 @@ +"dataset_name": "disambiguation_qa" +"description": "Clarify the meaning of sentences with ambiguous pronouns.\n\n" +"doc_to_text": "Q: In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: The chief told the counselor that they took the day off.\nOptions:\n(A) The chief took the day off\n(B) The counselor took the day off\n(C) Ambiguous\nA: (A)\n\nQ: In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: The manager sent a message to the secretary, but he didn't reply yet.\nOptions:\n(A) The secretary didn't reply yet\n(B) The manager didn't reply yet\n(C) Ambiguous\nA: (A)\n\nQ: In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: Bailey will plan to meet the director at his office\nOptions:\n(A) It will be Bailey's office\n(B) It will be the director's office\n(C) Ambiguous\nA: (C)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_disambiguation_qa" diff --git a/lm_eval/tasks/bbh/flan_fewshot/dyck_languages.yaml b/lm_eval/tasks/bbh/flan_fewshot/dyck_languages.yaml new file mode 100644 index 00000000..84e308cf --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/dyck_languages.yaml @@ -0,0 +1,5 @@ +"dataset_name": "dyck_languages" +"description": "Correctly close a Dyck-n word.\n\n" +"doc_to_text": "Q: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: [ { [\nA: ] } ]\n\nQ: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < > ( ( [ [ ( { } ) [ < > ] ]\nA: ] ) )\n\nQ: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < [ < [ { < [ ] < { } > > } ] > { { ( ) } { < [ < > ] > }\nA: } ] >\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_dyck_languages" diff --git a/lm_eval/tasks/bbh/flan_fewshot/formal_fallacies.yaml b/lm_eval/tasks/bbh/flan_fewshot/formal_fallacies.yaml new file mode 100644 index 00000000..c91769a5 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/formal_fallacies.yaml @@ -0,0 +1,5 @@ +"dataset_name": "formal_fallacies" +"description": "Distinguish deductively valid arguments from formal fallacies.\n\n" +"doc_to_text": "Q: \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: To begin with, Lesley is a close friend of Fernando. Moreover, being a close friend of Fernando or a schoolmate of Lowell is sufficient for being a great-grandfather of Leroy. It follows that Lesley is a great-grandfather of Leroy.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: valid\n\nQ: \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: Whoever is not a great-grandfather of Clyde is a stepbrother of Brian. Being an ancestor of Dana is sufficient for not being a great-grandfather of Clyde. We may conclude: Everyone who is an ancestor of Dana is a stepbrother of Brian, too.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: valid\n\nQ: \"It is not always easy to grasp who is consuming which products. The following argument pertains to this question: Every infrequent user of Paul Mitchell shampoo is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap, or both. No regular consumer of Lush soap is a rare consumer of Nioxin shampoo and, in the same time, a loyal buyer of Caress soap. It follows that whoever is an infrequent user of Paul Mitchell shampoo is not a regular consumer of Lush soap.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: invalid\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_formal_fallacies" diff --git a/lm_eval/tasks/bbh/flan_fewshot/geometric_shapes.yaml b/lm_eval/tasks/bbh/flan_fewshot/geometric_shapes.yaml new file mode 100644 index 00000000..1dbb242f --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/geometric_shapes.yaml @@ -0,0 +1,5 @@ +"dataset_name": "geometric_shapes" +"description": "Name geometric shapes from their SVG paths.\n\n" +"doc_to_text": "Q: This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: (F)\n\nQ: This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: (G)\n\nQ: This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: (D)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_geometric_shapes" diff --git a/lm_eval/tasks/bbh/flan_fewshot/hyperbaton.yaml b/lm_eval/tasks/bbh/flan_fewshot/hyperbaton.yaml new file mode 100644 index 00000000..090865b2 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/hyperbaton.yaml @@ -0,0 +1,5 @@ +"dataset_name": "hyperbaton" +"description": "Order adjectives correctly in English sentences.\n\n" +"doc_to_text": "Q: Which sentence has the correct adjective order:\nOptions:\n(A) rubber terrible ship\n(B) terrible rubber ship\nA: (B)\n\nQ: Which sentence has the correct adjective order:\nOptions:\n(A) repulsive small Brazilian exercise ship\n(B) Brazilian repulsive exercise small ship\nA: (A)\n\nQ: Which sentence has the correct adjective order:\nOptions:\n(A) blue gold wonderful square shoe\n(B) wonderful square blue gold shoe\nA: (B)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_hyperbaton" diff --git a/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_five_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_five_objects.yaml new file mode 100644 index 00000000..67f2c1a7 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_five_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_five_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: (B)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: (A)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: (C)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_logical_deduction_five_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_seven_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_seven_objects.yaml new file mode 100644 index 00000000..47593a0d --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_seven_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_seven_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: (B)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: (A)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: (C)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_logical_deduction_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_three_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_three_objects.yaml new file mode 100644 index 00000000..7264e653 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_three_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_three_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: (B)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: (A)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: (C)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_logical_deduction_three_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/movie_recommendation.yaml b/lm_eval/tasks/bbh/flan_fewshot/movie_recommendation.yaml new file mode 100644 index 00000000..8fb208b0 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/movie_recommendation.yaml @@ -0,0 +1,5 @@ +"dataset_name": "movie_recommendation" +"description": "Recommend movies similar to the given list of movies.\n\n" +"doc_to_text": "Q: Find a movie similar to Star Wars Episode IV - A New Hope, Indiana Jones and the Last Crusade, Star Wars Episode V - The Empire Strikes Back, The Big Lebowski:\nOptions:\n(A) Tetsuo\n(B) the Ironman\n(C) The Princess Bride\n(D) The Barkley Marathons The Race That Eats Its Young\n(E) Bug\nA: (C)\n\nQ: Find a movie similar to Twister, The Silence of the Lambs, Independence Day, Braveheart:\nOptions:\n(A) They Shoot Horses\n(B) Don't They\n(C) Forrest Gump\n(D) The Salton Sea\n(E) Extreme Days\nA: (C)\n\nQ: Find a movie similar to Minority Report, Total Recall, Inside Out, Forrest Gump:\nOptions:\n(A) Phenomena\n(B) Lilting\n(C) Catwoman\n(D) Edge of Tomorrow\nA: (D)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_movie_recommendation" diff --git a/lm_eval/tasks/bbh/flan_fewshot/multistep_arithmetic_two.yaml b/lm_eval/tasks/bbh/flan_fewshot/multistep_arithmetic_two.yaml new file mode 100644 index 00000000..ba5f65ca --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/multistep_arithmetic_two.yaml @@ -0,0 +1,5 @@ +"dataset_name": "multistep_arithmetic_two" +"description": "Solve multi-step arithmetic problems.\n\n" +"doc_to_text": "Q: ((-5 + 9 * -4 - 0) * (4 + -7 + 0 * -5)) =\nA: 123\n\nQ: ((-9 * 7 * 7 * -9) + (4 * -9 - 8 - -4)) =\nA: 3929\n\nQ: ((-3 + 5 * 8 * -4) - (9 - 8 * -7 + -9)) =\nA: -219\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_multistep_arithmetic_two" diff --git a/lm_eval/tasks/bbh/flan_fewshot/navigate.yaml b/lm_eval/tasks/bbh/flan_fewshot/navigate.yaml new file mode 100644 index 00000000..0aba5820 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/navigate.yaml @@ -0,0 +1,5 @@ +"dataset_name": "navigate" +"description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n" +"doc_to_text": "Q: If you follow these instructions, do you return to the starting point? Turn left. Turn around. Turn left. Take 7 steps. Take 2 steps. Take 4 steps. Take 8 steps.\nOptions:\n- Yes\n- No\nA: No\n\nQ: If you follow these instructions, do you return to the starting point? Turn around. Take 1 step. Take 6 steps. Turn around. Take 6 steps. Take 9 steps. Take 1 step.\nOptions:\n- Yes\n- No\nA: No\n\nQ: If you follow these instructions, do you return to the starting point? Always face forward. Take 2 steps right. Take 9 steps left. Take 7 steps right.\nOptions:\n- Yes\n- No\nA: Yes\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_navigate" diff --git a/lm_eval/tasks/bbh/flan_fewshot/object_counting.yaml b/lm_eval/tasks/bbh/flan_fewshot/object_counting.yaml new file mode 100644 index 00000000..7aa27a38 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/object_counting.yaml @@ -0,0 +1,5 @@ +"dataset_name": "object_counting" +"description": "Questions that involve enumerating objects and asking the model to count them.\n\n" +"doc_to_text": "Q: I have a blackberry, a clarinet, a nectarine, a plum, a strawberry, a banana, a flute, an orange, and a violin. How many fruits do I have?\nA: 6\n\nQ: I have an orange, a raspberry, two peaches, a blackberry, an apple, a grape, a nectarine, and three plums. How many fruits do I have?\nA: 11\n\nQ: I have a lettuce head, a head of broccoli, an onion, a stalk of celery, two carrots, a garlic, and a yam. How many vegetables do I have?\nA: 8\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_object_counting" diff --git a/lm_eval/tasks/bbh/flan_fewshot/penguins_in_a_table.yaml b/lm_eval/tasks/bbh/flan_fewshot/penguins_in_a_table.yaml new file mode 100644 index 00000000..f91d9c98 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/penguins_in_a_table.yaml @@ -0,0 +1,5 @@ +"dataset_name": "penguins_in_a_table" +"description": "Answer questions about a table of penguins and their attributes.\n\n" +"doc_to_text": "Q: Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. We now add a penguin to the table:\nJames, 12, 90, 12\nHow many penguins are less than 8 years old?\nOptions:\n(A) 1\n(B) 2\n(C) 3\n(D) 4\n(E) 5\nA: (B)\n\nQ: Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. Which is the youngest penguin?\nOptions:\n(A) Louis\n(B) Bernard\n(C) Vincent\n(D) Gwen\n(E) James\nA: (B)\n\nQ: Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. What is the name of the second penguin sorted by alphabetic order?\nOptions:\n(A) Louis\n(B) Bernard\n(C) Vincent\n(D) Gwen\n(E) James\nA: (D)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_penguins_in_a_table" diff --git a/lm_eval/tasks/bbh/flan_fewshot/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/reasoning_about_colored_objects.yaml new file mode 100644 index 00000000..d03dcd07 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/reasoning_about_colored_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "reasoning_about_colored_objects" +"description": "Answer extremely simple questions about the colors of objects on a surface.\n\n" +"doc_to_text": "Q: On the nightstand, there is a red pencil, a purple mug, a burgundy keychain, a fuchsia teddy bear, a black plate, and a blue stress ball. What color is the stress ball?\nOptions:\n(A) red\n(B) orange\n(C) yellow\n(D) green\n(E) blue\n(F) brown\n(G) magenta\n(H) fuchsia\n(I) mauve\n(J) teal\n(K) turquoise\n(L) burgundy\n(M) silver\n(N) gold\n(O) black\n(P) grey\n(Q) purple\n(R) pink\nA: (E)\n\nQ: On the table, you see a bunch of objects arranged in a row: a purple paperclip, a pink stress ball, a brown keychain, a green scrunchiephone charger, a mauve fidget spinner, and a burgundy pen. What is the color of the object directly to the right of the stress ball?\nOptions:\n(A) red\n(B) orange\n(C) yellow\n(D) green\n(E) blue\n(F) brown\n(G) magenta\n(H) fuchsia\n(I) mauve\n(J) teal\n(K) turquoise\n(L) burgundy\n(M) silver\n(N) gold\n(O) black\n(P) grey\n(Q) purple\n(R) pink\nA: (F)\n\nQ: On the nightstand, you see the following items arranged in a row: a teal plate, a burgundy keychain, a yellow scrunchiephone charger, an orange mug, a pink notebook, and a grey cup. How many non-orange items do you see to the left of the teal item?\nOptions:\n(A) zero\n(B) one\n(C) two\n(D) three\n(E) four\n(F) five\n(G) six\nA: (A)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_reasoning_about_colored_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/ruin_names.yaml b/lm_eval/tasks/bbh/flan_fewshot/ruin_names.yaml new file mode 100644 index 00000000..d12013db --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/ruin_names.yaml @@ -0,0 +1,5 @@ +"dataset_name": "ruin_names" +"description": "Select the humorous edit that 'ruins' the input movie or musical artist name.\n\n" +"doc_to_text": "Q: Which of the following is a humorous edit of this artist or movie name: 'whitesnake'?\nOptions:\n(A) whitesnape\n(B) whitesnapke\n(C) whitesnuake\n(D) mwhitesnake\nA: (A)\n\nQ: Which of the following is a humorous edit of this artist or movie name: 'one of our dinosaurs is missing'?\nOptions:\n(A) ofne of our dinosaurs is missing\n(B) one af our dinosaurs is missing\n(C) one of our dinosaurs is pissing\n(D) one of our dinosaur is missing\nA: (C)\n\nQ: Which of the following is a humorous edit of this artist or movie name: 'counting crows'?\nOptions:\n(A) countingy crows\n(B) counting cows\n(C) courting crows\n(D) coutnting crows\nA: (B)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_ruin_names" diff --git a/lm_eval/tasks/bbh/flan_fewshot/salient_translation_error_detection.yaml b/lm_eval/tasks/bbh/flan_fewshot/salient_translation_error_detection.yaml new file mode 100644 index 00000000..bfbcfa35 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/salient_translation_error_detection.yaml @@ -0,0 +1,5 @@ +"dataset_name": "salient_translation_error_detection" +"description": "Detect the type of error in an English translation of a German source sentence.\n\n" +"doc_to_text": "Q: The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: In der Liste der Baudenkmale in Lenzen (Elbe) sind alle Baudenkmale der brandenburgischen Stadt Lenzen (Elbe) und ihrer Ortsteile aufgelistet.\nTranslation: In the list of architectural monuments in Lenzen all architectural monuments of the Brandenburg city of Lenzen and its districts are listed.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: (D)\n\nQ: The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: Auf dieser Seite sind die Baudenkmäler der oberbayerischen Großen Kreisstadt Landsberg am Lech zusammengestellt.\nTranslation: On this page are compiled the architectural monuments of the town of Landsberg am Lech.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: (E)\n\nQ: The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: Łeba ist eine Kleinstadt und ein Badeort im Powiat Lęborski der polnischen Woiwodschaft Pommern.\nTranslation: Eba is not a small town and seaside resort in the Powiat Léborski county of the Pomeranian Voivodeship of Poland.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: (C)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_salient_translation_error_detection" diff --git a/lm_eval/tasks/bbh/flan_fewshot/snarks.yaml b/lm_eval/tasks/bbh/flan_fewshot/snarks.yaml new file mode 100644 index 00000000..375da1a2 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/snarks.yaml @@ -0,0 +1,5 @@ +"dataset_name": "snarks" +"description": "Determine which of two sentences is sarcastic.\n\nAccording to Cambridge University Dictionary, sarcasm is \"the use of remarks that clearly mean the opposite of what they say, made in order to hurt someone's feelings or to criticize something in a humorous way.\" Sarcastic sentences often contain satirical or ironic utterances, hyperboles, ambivalent or witty remarks.\n\n" +"doc_to_text": "Q: Which statement is sarcastic?\nOptions:\n(A) Yes, because having interests and actively researching them is a huge waste\n(B) Yes, because having interests and actively researching them is a huge deal\nA: (A)\n\nQ: Which statement is sarcastic?\nOptions:\n(A) No one is going to disagree with you on this. Avoiding ad hominem attacks really help your case\n(B) No one is going to disagree with you on this. Ad hominem attacks really help your case\nA: (B)\n\nQ: Which statement is sarcastic?\nOptions:\n(A) Consistency in the league's punishments? What do you think this is supposed to be, politics?\n(B) Consistency in the league's punishments? What do you think this is supposed to be, moral?\nA: (A)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_snarks" diff --git a/lm_eval/tasks/bbh/flan_fewshot/sports_understanding.yaml b/lm_eval/tasks/bbh/flan_fewshot/sports_understanding.yaml new file mode 100644 index 00000000..f72cdba9 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/sports_understanding.yaml @@ -0,0 +1,5 @@ +"dataset_name": "sports_understanding" +"description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n" +"doc_to_text": "Q: Is the following sentence plausible? \"Bam Adebayo scored a reverse layup in the Western Conference Finals.\"\nA: yes\n\nQ: Is the following sentence plausible? \"Santi Cazorla scored a touchdown.\"\nA: no\n\nQ: Is the following sentence plausible? \"DeMar DeRozan was called for the goal tend.\"\nA: yes\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_sports_understanding" diff --git a/lm_eval/tasks/bbh/flan_fewshot/temporal_sequences.yaml b/lm_eval/tasks/bbh/flan_fewshot/temporal_sequences.yaml new file mode 100644 index 00000000..0f5e5380 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/temporal_sequences.yaml @@ -0,0 +1,5 @@ +"dataset_name": "temporal_sequences" +"description": "Task description: Answer questions about which times certain events could have occurred.\n\n" +"doc_to_text": "Q: Today, Emily went to the museum. Between what times could they have gone?\nWe know that:\nEmily woke up at 1pm.\nElizabeth saw Emily reading at the library from 2pm to 4pm.\nJessica saw Emily watching a movie at the theater from 4pm to 5pm.\nLeslie saw Emily waiting at the airport from 5pm to 6pm.\nWilliam saw Emily buying clothes at the mall from 6pm to 7pm.\nThe museum was closed after 7pm.\nBetween what times could Emily have gone to the museum?\nOptions:\n(A) 1pm to 2pm\n(B) 6pm to 7pm\n(C) 5pm to 6pm\n(D) 2pm to 4pm\nA: (A)\n\nQ: Today, Elizabeth went to the amusement park. Between what times could they have gone?\nWe know that:\nElizabeth woke up at 7am.\nDavid saw Elizabeth fixing their computer at the electronic store from 1pm to 2pm.\nSarah saw Elizabeth playing tennis at the tennis court from 2pm to 3pm.\nSusan saw Elizabeth walking towards the Statue of Liberty from 3pm to 6pm.\nAndrew saw Elizabeth taking photos near the Eiffel Tower from 6pm to 9pm.\nEmily saw Elizabeth getting a coffee at the cafe from 9pm to 10pm.\nThe amusement park was closed after 10pm.\nBetween what times could Elizabeth have gone to the amusement park?\nOptions:\n(A) 7am to 1pm\n(B) 9pm to 10pm\n(C) 1pm to 2pm\n(D) 3pm to 6pm\nA: (A)\n\nQ: Today, Tiffany went to the beach. Between what times could they have gone?\nWe know that:\nTiffany woke up at 5am.\nBetty saw Tiffany getting a coffee at the cafe from 5am to 6am.\nJessica saw Tiffany working at the office from 6am to 9am.\nJohn saw Tiffany stretching at a yoga studio from 9am to 12pm.\nSean saw Tiffany sitting on a rooftop from 12pm to 2pm.\nSarah saw Tiffany playing tennis at the tennis court from 2pm to 3pm.\nThe beach was closed after 4pm.\nBetween what times could Tiffany have gone to the beach?\nOptions:\n(A) 9am to 12pm\n(B) 12pm to 2pm\n(C) 5am to 6am\n(D) 3pm to 4pm\nA: (D)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_temporal_sequences" diff --git a/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_five_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_five_objects.yaml new file mode 100644 index 00000000..112ede19 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_five_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_five_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: (A)\n\nQ: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: (C)\n\nQ: Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: (C)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_tracking_shuffled_objects_five_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_seven_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_seven_objects.yaml new file mode 100644 index 00000000..83821c54 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_seven_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_seven_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: (A)\n\nQ: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: (C)\n\nQ: Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: (C)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_tracking_shuffled_objects_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_three_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_three_objects.yaml new file mode 100644 index 00000000..afee9bb9 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_three_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_three_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: (A)\n\nQ: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: (C)\n\nQ: Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: (C)\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_tracking_shuffled_objects_three_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/web_of_lies.yaml b/lm_eval/tasks/bbh/flan_fewshot/web_of_lies.yaml new file mode 100644 index 00000000..2f1c5686 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/web_of_lies.yaml @@ -0,0 +1,5 @@ +"dataset_name": "web_of_lies" +"description": "Evaluate a random boolean function expressed as a word problem.\n\n" +"doc_to_text": "Q: Question: Fidel tells the truth. Jerry says Fidel tells the truth. Vina says Jerry tells the truth. Millicent says Vina lies. Raymond says Millicent lies. Does Raymond tell the truth?\nA: Yes\n\nQ: Question: Kristian lies. Millie says Kristian lies. Maybelle says Millie tells the truth. Fidel says Maybelle lies. Leda says Fidel lies. Does Leda tell the truth?\nA: Yes\n\nQ: Question: Kristian tells the truth. Michaela says Kristian lies. Raymond says Michaela tells the truth. Osvaldo says Raymond tells the truth. Jamey says Osvaldo tells the truth. Does Jamey tell the truth?\nA: No\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_web_of_lies" diff --git a/lm_eval/tasks/bbh/flan_fewshot/word_sorting.yaml b/lm_eval/tasks/bbh/flan_fewshot/word_sorting.yaml new file mode 100644 index 00000000..43b7d43d --- /dev/null +++ b/lm_eval/tasks/bbh/flan_fewshot/word_sorting.yaml @@ -0,0 +1,5 @@ +"dataset_name": "word_sorting" +"description": "Sort a list of words.\n\n" +"doc_to_text": "Q: Sort the following words alphabetically: List: oven costume counterpart\nA: costume counterpart oven\n\nQ: Sort the following words alphabetically: List: hypochlorite ponderosa phone credulity\nA: credulity hypochlorite phone ponderosa\n\nQ: Sort the following words alphabetically: List: newt arson parthia seismography mugho aspect census\nA: arson aspect census mugho newt parthia seismography\n\nQ: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_fewshot_word_sorting" -- GitLab From 0d195e90f2796069f6c1d92e7f19854a66b11245 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 10:41:10 +0000 Subject: [PATCH 104/264] add flan_zeroshot --- .../flan_zeroshot/_flan_zeroshot_template_yaml | 16 ++++++++++++++++ .../bbh/flan_zeroshot/boolean_expressions.yaml | 5 +++++ .../bbh/flan_zeroshot/causal_judgement.yaml | 5 +++++ .../bbh/flan_zeroshot/date_understanding.yaml | 5 +++++ .../bbh/flan_zeroshot/disambiguation_qa.yaml | 5 +++++ .../tasks/bbh/flan_zeroshot/dyck_languages.yaml | 5 +++++ .../bbh/flan_zeroshot/formal_fallacies.yaml | 5 +++++ .../bbh/flan_zeroshot/geometric_shapes.yaml | 5 +++++ lm_eval/tasks/bbh/flan_zeroshot/hyperbaton.yaml | 5 +++++ .../logical_deduction_five_objects.yaml | 5 +++++ .../logical_deduction_seven_objects.yaml | 5 +++++ .../logical_deduction_three_objects.yaml | 5 +++++ .../bbh/flan_zeroshot/movie_recommendation.yaml | 5 +++++ .../flan_zeroshot/multistep_arithmetic_two.yaml | 5 +++++ lm_eval/tasks/bbh/flan_zeroshot/navigate.yaml | 5 +++++ .../tasks/bbh/flan_zeroshot/object_counting.yaml | 5 +++++ .../bbh/flan_zeroshot/penguins_in_a_table.yaml | 5 +++++ .../reasoning_about_colored_objects.yaml | 5 +++++ lm_eval/tasks/bbh/flan_zeroshot/ruin_names.yaml | 5 +++++ .../salient_translation_error_detection.yaml | 5 +++++ lm_eval/tasks/bbh/flan_zeroshot/snarks.yaml | 5 +++++ .../bbh/flan_zeroshot/sports_understanding.yaml | 5 +++++ .../bbh/flan_zeroshot/temporal_sequences.yaml | 5 +++++ .../tracking_shuffled_objects_five_objects.yaml | 5 +++++ .../tracking_shuffled_objects_seven_objects.yaml | 5 +++++ .../tracking_shuffled_objects_three_objects.yaml | 5 +++++ lm_eval/tasks/bbh/flan_zeroshot/web_of_lies.yaml | 5 +++++ .../tasks/bbh/flan_zeroshot/word_sorting.yaml | 5 +++++ 28 files changed, 151 insertions(+) create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/_flan_zeroshot_template_yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/boolean_expressions.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/causal_judgement.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/date_understanding.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/disambiguation_qa.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/dyck_languages.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/formal_fallacies.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/geometric_shapes.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/hyperbaton.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_five_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_seven_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_three_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/movie_recommendation.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/multistep_arithmetic_two.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/navigate.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/object_counting.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/penguins_in_a_table.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/reasoning_about_colored_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/ruin_names.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/salient_translation_error_detection.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/snarks.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/sports_understanding.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/temporal_sequences.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_five_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_seven_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_three_objects.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/web_of_lies.yaml create mode 100644 lm_eval/tasks/bbh/flan_zeroshot/word_sorting.yaml diff --git a/lm_eval/tasks/bbh/flan_zeroshot/_flan_zeroshot_template_yaml b/lm_eval/tasks/bbh/flan_zeroshot/_flan_zeroshot_template_yaml new file mode 100644 index 00000000..832c728d --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/_flan_zeroshot_template_yaml @@ -0,0 +1,16 @@ +group: bbh_flan_zeroshot +dataset_path: lukaemon/bbh +output_type: greedy_until +test_split: test +doc_to_target: "{{target}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 diff --git a/lm_eval/tasks/bbh/flan_zeroshot/boolean_expressions.yaml b/lm_eval/tasks/bbh/flan_zeroshot/boolean_expressions.yaml new file mode 100644 index 00000000..7098c7e3 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/boolean_expressions.yaml @@ -0,0 +1,5 @@ +"dataset_name": "boolean_expressions" +"description": "Evaluate the result of a random Boolean expression.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_boolean_expressions" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/causal_judgement.yaml b/lm_eval/tasks/bbh/flan_zeroshot/causal_judgement.yaml new file mode 100644 index 00000000..953419bd --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/causal_judgement.yaml @@ -0,0 +1,5 @@ +"dataset_name": "causal_judgement" +"description": "Answer questions about causal attribution.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_causal_judgement" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/date_understanding.yaml b/lm_eval/tasks/bbh/flan_zeroshot/date_understanding.yaml new file mode 100644 index 00000000..99255c90 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/date_understanding.yaml @@ -0,0 +1,5 @@ +"dataset_name": "date_understanding" +"description": "Infer the date from context.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_date_understanding" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/disambiguation_qa.yaml b/lm_eval/tasks/bbh/flan_zeroshot/disambiguation_qa.yaml new file mode 100644 index 00000000..65c515cc --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/disambiguation_qa.yaml @@ -0,0 +1,5 @@ +"dataset_name": "disambiguation_qa" +"description": "Clarify the meaning of sentences with ambiguous pronouns.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_disambiguation_qa" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/dyck_languages.yaml b/lm_eval/tasks/bbh/flan_zeroshot/dyck_languages.yaml new file mode 100644 index 00000000..10b87a70 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/dyck_languages.yaml @@ -0,0 +1,5 @@ +"dataset_name": "dyck_languages" +"description": "Correctly close a Dyck-n word.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_dyck_languages" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/formal_fallacies.yaml b/lm_eval/tasks/bbh/flan_zeroshot/formal_fallacies.yaml new file mode 100644 index 00000000..7fcf6920 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/formal_fallacies.yaml @@ -0,0 +1,5 @@ +"dataset_name": "formal_fallacies" +"description": "Distinguish deductively valid arguments from formal fallacies.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_formal_fallacies" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/geometric_shapes.yaml b/lm_eval/tasks/bbh/flan_zeroshot/geometric_shapes.yaml new file mode 100644 index 00000000..ee6082b9 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/geometric_shapes.yaml @@ -0,0 +1,5 @@ +"dataset_name": "geometric_shapes" +"description": "Name geometric shapes from their SVG paths.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_geometric_shapes" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/hyperbaton.yaml b/lm_eval/tasks/bbh/flan_zeroshot/hyperbaton.yaml new file mode 100644 index 00000000..3e82c854 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/hyperbaton.yaml @@ -0,0 +1,5 @@ +"dataset_name": "hyperbaton" +"description": "Order adjectives correctly in English sentences.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_hyperbaton" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_five_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_five_objects.yaml new file mode 100644 index 00000000..1e4adeb4 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_five_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_five_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_logical_deduction_five_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_seven_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_seven_objects.yaml new file mode 100644 index 00000000..910ca139 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_seven_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_seven_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_logical_deduction_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_three_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_three_objects.yaml new file mode 100644 index 00000000..405cf023 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_three_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "logical_deduction_three_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_logical_deduction_three_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/movie_recommendation.yaml b/lm_eval/tasks/bbh/flan_zeroshot/movie_recommendation.yaml new file mode 100644 index 00000000..54dc45f3 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/movie_recommendation.yaml @@ -0,0 +1,5 @@ +"dataset_name": "movie_recommendation" +"description": "Recommend movies similar to the given list of movies.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_movie_recommendation" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/multistep_arithmetic_two.yaml b/lm_eval/tasks/bbh/flan_zeroshot/multistep_arithmetic_two.yaml new file mode 100644 index 00000000..494b94fe --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/multistep_arithmetic_two.yaml @@ -0,0 +1,5 @@ +"dataset_name": "multistep_arithmetic_two" +"description": "Solve multi-step arithmetic problems.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_multistep_arithmetic_two" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/navigate.yaml b/lm_eval/tasks/bbh/flan_zeroshot/navigate.yaml new file mode 100644 index 00000000..3f107003 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/navigate.yaml @@ -0,0 +1,5 @@ +"dataset_name": "navigate" +"description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_navigate" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/object_counting.yaml b/lm_eval/tasks/bbh/flan_zeroshot/object_counting.yaml new file mode 100644 index 00000000..8e1a675e --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/object_counting.yaml @@ -0,0 +1,5 @@ +"dataset_name": "object_counting" +"description": "Questions that involve enumerating objects and asking the model to count them.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_object_counting" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/penguins_in_a_table.yaml b/lm_eval/tasks/bbh/flan_zeroshot/penguins_in_a_table.yaml new file mode 100644 index 00000000..c3c4138c --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/penguins_in_a_table.yaml @@ -0,0 +1,5 @@ +"dataset_name": "penguins_in_a_table" +"description": "Answer questions about a table of penguins and their attributes.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_penguins_in_a_table" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/reasoning_about_colored_objects.yaml new file mode 100644 index 00000000..bbe01119 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/reasoning_about_colored_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "reasoning_about_colored_objects" +"description": "Answer extremely simple questions about the colors of objects on a surface.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_reasoning_about_colored_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/ruin_names.yaml b/lm_eval/tasks/bbh/flan_zeroshot/ruin_names.yaml new file mode 100644 index 00000000..b43e9414 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/ruin_names.yaml @@ -0,0 +1,5 @@ +"dataset_name": "ruin_names" +"description": "Select the humorous edit that 'ruins' the input movie or musical artist name.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_ruin_names" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/salient_translation_error_detection.yaml b/lm_eval/tasks/bbh/flan_zeroshot/salient_translation_error_detection.yaml new file mode 100644 index 00000000..43ebe9f0 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/salient_translation_error_detection.yaml @@ -0,0 +1,5 @@ +"dataset_name": "salient_translation_error_detection" +"description": "Detect the type of error in an English translation of a German source sentence.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_salient_translation_error_detection" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/snarks.yaml b/lm_eval/tasks/bbh/flan_zeroshot/snarks.yaml new file mode 100644 index 00000000..df46e580 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/snarks.yaml @@ -0,0 +1,5 @@ +"dataset_name": "snarks" +"description": "Determine which of two sentences is sarcastic.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_snarks" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/sports_understanding.yaml b/lm_eval/tasks/bbh/flan_zeroshot/sports_understanding.yaml new file mode 100644 index 00000000..fdbc3287 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/sports_understanding.yaml @@ -0,0 +1,5 @@ +"dataset_name": "sports_understanding" +"description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_sports_understanding" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/temporal_sequences.yaml b/lm_eval/tasks/bbh/flan_zeroshot/temporal_sequences.yaml new file mode 100644 index 00000000..4a526778 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/temporal_sequences.yaml @@ -0,0 +1,5 @@ +"dataset_name": "temporal_sequences" +"description": "Task description: Answer questions about which times certain events could have occurred.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_temporal_sequences" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_five_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_five_objects.yaml new file mode 100644 index 00000000..39d96c56 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_five_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_five_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_tracking_shuffled_objects_five_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_seven_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_seven_objects.yaml new file mode 100644 index 00000000..c1f42e8f --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_seven_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_seven_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_tracking_shuffled_objects_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_three_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_three_objects.yaml new file mode 100644 index 00000000..0e02323d --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_three_objects.yaml @@ -0,0 +1,5 @@ +"dataset_name": "tracking_shuffled_objects_three_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_tracking_shuffled_objects_three_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/web_of_lies.yaml b/lm_eval/tasks/bbh/flan_zeroshot/web_of_lies.yaml new file mode 100644 index 00000000..179aab6a --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/web_of_lies.yaml @@ -0,0 +1,5 @@ +"dataset_name": "web_of_lies" +"description": "Evaluate a random boolean function expressed as a word problem.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_web_of_lies" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/word_sorting.yaml b/lm_eval/tasks/bbh/flan_zeroshot/word_sorting.yaml new file mode 100644 index 00000000..9317b875 --- /dev/null +++ b/lm_eval/tasks/bbh/flan_zeroshot/word_sorting.yaml @@ -0,0 +1,5 @@ +"dataset_name": "word_sorting" +"description": "Sort a list of words.\n\n" +"doc_to_text": "Q: {{input}}\nA:" +"include": "_template_yaml" +"task": "bbh_flan_zeroshot_word_sorting" -- GitLab From 86e785899d703bd2edc37df2e7e872220f3fb2cf Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 10:41:57 +0000 Subject: [PATCH 105/264] modified changes to fix loglikelihood prediction for seq2seq --- lm_eval/models/huggingface.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index 65479d40..4b509352 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -409,12 +409,13 @@ class HFLM(LM): utils.clear_torch_cache() return batch_size - def tok_encode(self, string: str, left_truncate_len=None): + def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None): """ """ - if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: - add_special_tokens = False - elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: - add_special_tokens = True + if add_special_tokens is None: + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + add_special_tokens = False + elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + add_special_tokens = True encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens) @@ -529,8 +530,12 @@ class HFLM(LM): if n_spaces > 0: continuation = context[-n_spaces:] + continuation context = context[:-n_spaces] - whole_enc = self.tok_encode(context + continuation) - context_enc = self.tok_encode(context) + + whole_enc = self.tok_encode(context + continuation, add_special_tokens=False) + context_enc = self.tok_encode(context, add_special_tokens=False) + + # whole_enc = self.tok_encode(context + continuation) + # context_enc = self.tok_encode(context, add_special_tokens=False) context_enc_len = len(context_enc) continuation_enc = whole_enc[context_enc_len:] return context_enc, continuation_enc -- GitLab From 96d9e38c5e302b5c4dcddae2542a9ec10333f057 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 10:50:20 +0000 Subject: [PATCH 106/264] update benchmarks --- .../flan/prompt_templates/flan_arc.yaml | 24 +++++++++++++++++++ lm_eval/benchmarks/flan_held_out.yaml | 12 ++++++++-- 2 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 lm_eval/benchmarks/flan/prompt_templates/flan_arc.yaml diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_arc.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_arc.yaml new file mode 100644 index 00000000..c9135a51 --- /dev/null +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_arc.yaml @@ -0,0 +1,24 @@ +# Flan Prompt Templates +prompts: + "template-0": + doc_to_text: "{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + "template-1": + doc_to_text: "Question: {{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}\nAnswer:" + doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + "template-2": + doc_to_text: "Question: {{question}}\n\nWhat is the correct answer to the question from the following choices?\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + "template-3": + doc_to_text: "Q: {{question}}\nWhat is the correct answer to this question?\nOPTIONS:\n- {{choices.text|join('\n- ')}}...A:" + doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + "template-4": + doc_to_text: "Choose your answer?\n\n{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + "template-5": + doc_to_text: "Answer the question\n\n{{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + "template-6": + doc_to_text: "{{question}}\n\nPick the answer from these options\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + diff --git a/lm_eval/benchmarks/flan_held_out.yaml b/lm_eval/benchmarks/flan_held_out.yaml index cde82722..f61affea 100644 --- a/lm_eval/benchmarks/flan_held_out.yaml +++ b/lm_eval/benchmarks/flan_held_out.yaml @@ -1,4 +1,12 @@ group: flan_held_out task: - - bbh_flan - - mmlu_flan + # BBH + - bbh_flan_zeroshot + - bbh_flan_fewshot + - bbh_flan_cot_fewshot + - bbh_flan_cot_zeroshot + # MMLU + - mmlu_flan_n_shot_generative + - mmlu_flan_n_shot_loglikelihood + - mmlu_flan_cot_zeroshot + - mmlu_flan_cot_fewshot -- GitLab From c8b76a3dad9fb9c3c5d89db695b2b758e223f31c Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 10:50:50 +0000 Subject: [PATCH 107/264] edit group name --- .../tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml | 3 +-- .../mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml index 3f649666..c52f8dc7 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml @@ -1,6 +1,5 @@ -group: mmlu_flan +group: mmlu_flan_n_shot_generative dataset_path: cais/mmlu -# validation_split: validation test_split: test fewshot_split: dev # doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: " diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml index 2a09f787..2162ade8 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml @@ -1,4 +1,4 @@ -group: mmlu_flan_loglikelihood +group: mmlu_flan_n_shot_loglikelihood dataset_path: cais/mmlu # validation_split: validation test_split: test -- GitLab From e795efcfacb1f16e0ba79dee221ff18e44906f52 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 10:51:16 +0000 Subject: [PATCH 108/264] updates --- lm_eval/tasks/bbh/README.md | 2 +- lm_eval/tasks/bbh/_generate_configs.py | 92 +++++++++++++++---- lm_eval/tasks/bbh/_template_yaml | 15 --- .../_flan_cot_fewshot_template_yaml | 2 +- .../_flan_cot_zeroshot_template_yaml | 2 +- 5 files changed, 75 insertions(+), 38 deletions(-) delete mode 100644 lm_eval/tasks/bbh/_template_yaml diff --git a/lm_eval/tasks/bbh/README.md b/lm_eval/tasks/bbh/README.md index 91be60fc..4f8dad49 100644 --- a/lm_eval/tasks/bbh/README.md +++ b/lm_eval/tasks/bbh/README.md @@ -25,7 +25,7 @@ Homepage: https://github.com/suzgunmirac/BIG-Bench-Hard #### Groups -- `bbh` +- `bbh_flan_zeroshot` #### Tasks diff --git a/lm_eval/tasks/bbh/_generate_configs.py b/lm_eval/tasks/bbh/_generate_configs.py index 9e603994..8d805a64 100644 --- a/lm_eval/tasks/bbh/_generate_configs.py +++ b/lm_eval/tasks/bbh/_generate_configs.py @@ -1,32 +1,84 @@ +""" +Take in a YAML, and output all other splits with this YAML +""" +import os +import re import yaml -import inspect -import datasets +import requests +import argparse +import datasets from tqdm import tqdm +from lm_eval import utils +from lm_eval.logger import eval_logger + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument( + "--save_prefix_path", default="flan_zeroshot" + ) + parser.add_argument( + "--cot", default=False + ) + parser.add_argument( + "--fewshot", default=False + ) + parser.add_argument("--task_prefix", default="") + return parser.parse_args() + + +if __name__ == "__main__": + + args = parse_args() -def main() -> None: + # get filename of base_yaml so we can `"include": ` it in our other YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path) as f: + base_yaml = yaml.full_load(f) + + base_doc_to_text = "Q: {{input}}\nA:" + answer_regex = re.compile("(?<=answer is )(.*)(?=.)") dataset_path = "lukaemon/bbh" for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()): - file_name = f"{task}.yaml" - try: - with open(f"{file_name}", "w") as f: - f.write("# Generated by _generate_configs.py\n") - yaml.dump( - { - "include": "_template_yaml", - "task": f"{dataset_path.split('/')[-1]}_{task}", - "dataset_name": task, - }, - f, - ) - except FileExistsError: - pass + resp = requests.get(f"https://raw.githubusercontent.com/suzgunmirac/BIG-Bench-Hard/main/cot-prompts/{task}.txt").content.decode('utf-8') + prompt = resp.split("\n-----\n")[-1] + description, *few_shot = prompt.split("\n\nQ:") + + prefix_doc_to_text = "" + if args.fewshot: + if args.cot: + prefix_doc_to_text = " ".join(few_shot) + else: + for shot in few_shot: + shot = "Q:"+shot + try: + answer = answer_regex.search(shot)[0] + except: + print("task", task) + print(shot) + example = shot.split("Let\'s think step by step.")[0] + prefix_doc_to_text += f"{example}{answer}\n\n" + + doc_to_text = prefix_doc_to_text + base_doc_to_text + if args.cot: + doc_to_text = doc_to_text + " Let's think step by step.\n" + + yaml_dict = { + "include": "_template_yaml", + "task": f"bbh_{args.task_prefix}_{task}", + "dataset_name": task, + "description": description+"\n\n", + "doc_to_text": doc_to_text, + } + + file_save_path = args.save_prefix_path + f"/{task}.yaml" + eval_logger.info(f"Saving yaml for subset {task} to {file_save_path}") + with open(file_save_path, "w") as yaml_file: + yaml.dump(yaml_dict, yaml_file, width=float("inf"), allow_unicode=True, default_style='"') -if __name__ == "__main__": - main() -# https://raw.githubusercontent.com/suzgunmirac/BIG-Bench-Hard/main/cot-prompts/boolean_expressions.txt diff --git a/lm_eval/tasks/bbh/_template_yaml b/lm_eval/tasks/bbh/_template_yaml deleted file mode 100644 index 3b174480..00000000 --- a/lm_eval/tasks/bbh/_template_yaml +++ /dev/null @@ -1,15 +0,0 @@ -group: bbh -dataset_path: lukaemon/bbh -output_type: greedy_until -test_split: test -doc_to_text: "Q: {{input}}\nA:" -doc_to_target: "{{target}}" -metric_list: - - metric: exact_match - aggregation: mean - higher_is_better: true -generation_kwargs: - until: - - "" - do_sample: false - temperature: 0.0 diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml index d9dbf8c5..e435e57b 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml @@ -1,4 +1,4 @@ -group: bbh_flan_fewshot +group: bbh_flan_cot_fewshot dataset_path: lukaemon/bbh output_type: greedy_until test_split: test diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml index 0f6fc880..f660be72 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml @@ -1,4 +1,4 @@ -group: bbh_flan_zeroshot +group: bbh_flan_cot_zeroshot dataset_path: lukaemon/bbh output_type: greedy_until test_split: test -- GitLab From 03be40e28b3ab9c0ea14ae454b883c08f5957ca0 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 11:26:07 +0000 Subject: [PATCH 109/264] udpate --- lm_eval/tasks/bbh/README.md | 4 +++ .../_flan_cot_fewshot_template_yaml | 4 +-- .../_flan_cot_zeroshot_template_yaml | 4 +-- .../flan_fewshot/_flan_fewshot_template_yaml | 4 +-- .../_flan_zeroshot_template_yaml | 4 +-- lm_eval/tasks/mmlu/_generate_configs.py | 3 +-- .../hendrycks_test_original_default.yaml | 21 --------------- .../_mmlu_flan_cot_fewshot_template_yaml | 27 +++++++++---------- .../_mmlu_flan_generative_template_yaml | 25 +++++++++-------- .../_mmlu_flan_generative_template_yaml | 16 ++++------- .../_mmlu_flan_loglikelihood_template_yaml | 5 ++-- .../flan_n_shot/mmlu_abstract_algebra.yaml | 7 ----- .../tasks/mmlu/flan_n_shot/mmlu_anatomy.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_astronomy.yaml | 7 ----- .../flan_n_shot/mmlu_business_ethics.yaml | 7 ----- .../flan_n_shot/mmlu_clinical_knowledge.yaml | 7 ----- .../flan_n_shot/mmlu_college_biology.yaml | 7 ----- .../flan_n_shot/mmlu_college_chemistry.yaml | 7 ----- .../mmlu_college_computer_science.yaml | 7 ----- .../flan_n_shot/mmlu_college_mathematics.yaml | 7 ----- .../flan_n_shot/mmlu_college_medicine.yaml | 7 ----- .../flan_n_shot/mmlu_college_physics.yaml | 7 ----- .../flan_n_shot/mmlu_computer_security.yaml | 7 ----- .../flan_n_shot/mmlu_conceptual_physics.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_econometrics.yaml | 7 ----- .../mmlu_electrical_engineering.yaml | 7 ----- .../mmlu_elementary_mathematics.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_formal_logic.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_global_facts.yaml | 7 ----- .../flan_n_shot/mmlu_high_school_biology.yaml | 7 ----- .../mmlu_high_school_chemistry.yaml | 7 ----- .../mmlu_high_school_computer_science.yaml | 7 ----- .../mmlu_high_school_european_history.yaml | 7 ----- .../mmlu_high_school_geography.yaml | 7 ----- ...u_high_school_government_and_politics.yaml | 7 ----- .../mmlu_high_school_macroeconomics.yaml | 7 ----- .../mmlu_high_school_mathematics.yaml | 7 ----- .../mmlu_high_school_microeconomics.yaml | 7 ----- .../flan_n_shot/mmlu_high_school_physics.yaml | 7 ----- .../mmlu_high_school_psychology.yaml | 7 ----- .../mmlu_high_school_statistics.yaml | 7 ----- .../mmlu_high_school_us_history.yaml | 7 ----- .../mmlu_high_school_world_history.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_human_aging.yaml | 7 ----- .../flan_n_shot/mmlu_human_sexuality.yaml | 7 ----- .../flan_n_shot/mmlu_international_law.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_jurisprudence.yaml | 7 ----- .../flan_n_shot/mmlu_logical_fallacies.yaml | 7 ----- .../mmlu_loglikelihood_abstract_algebra.yaml | 7 ----- .../mmlu_loglikelihood_anatomy.yaml | 7 ----- .../mmlu_loglikelihood_astronomy.yaml | 7 ----- .../mmlu_loglikelihood_business_ethics.yaml | 7 ----- ...mmlu_loglikelihood_clinical_knowledge.yaml | 7 ----- .../mmlu_loglikelihood_college_biology.yaml | 7 ----- .../mmlu_loglikelihood_college_chemistry.yaml | 7 ----- ...oglikelihood_college_computer_science.yaml | 7 ----- ...mlu_loglikelihood_college_mathematics.yaml | 7 ----- .../mmlu_loglikelihood_college_medicine.yaml | 7 ----- .../mmlu_loglikelihood_college_physics.yaml | 7 ----- .../mmlu_loglikelihood_computer_security.yaml | 7 ----- ...mmlu_loglikelihood_conceptual_physics.yaml | 7 ----- .../mmlu_loglikelihood_econometrics.yaml | 7 ----- ..._loglikelihood_electrical_engineering.yaml | 7 ----- ..._loglikelihood_elementary_mathematics.yaml | 7 ----- .../mmlu_loglikelihood_formal_logic.yaml | 7 ----- .../mmlu_loglikelihood_global_facts.yaml | 7 ----- ...mlu_loglikelihood_high_school_biology.yaml | 7 ----- ...u_loglikelihood_high_school_chemistry.yaml | 7 ----- ...kelihood_high_school_computer_science.yaml | 7 ----- ...kelihood_high_school_european_history.yaml | 7 ----- ...u_loglikelihood_high_school_geography.yaml | 7 ----- ...d_high_school_government_and_politics.yaml | 7 ----- ...likelihood_high_school_macroeconomics.yaml | 7 ----- ...loglikelihood_high_school_mathematics.yaml | 7 ----- ...likelihood_high_school_microeconomics.yaml | 7 ----- ...mlu_loglikelihood_high_school_physics.yaml | 7 ----- ..._loglikelihood_high_school_psychology.yaml | 7 ----- ..._loglikelihood_high_school_statistics.yaml | 7 ----- ..._loglikelihood_high_school_us_history.yaml | 7 ----- ...glikelihood_high_school_world_history.yaml | 7 ----- .../mmlu_loglikelihood_human_aging.yaml | 7 ----- .../mmlu_loglikelihood_human_sexuality.yaml | 7 ----- .../mmlu_loglikelihood_international_law.yaml | 7 ----- .../mmlu_loglikelihood_jurisprudence.yaml | 7 ----- .../mmlu_loglikelihood_logical_fallacies.yaml | 7 ----- .../mmlu_loglikelihood_machine_learning.yaml | 7 ----- .../mmlu_loglikelihood_management.yaml | 7 ----- .../mmlu_loglikelihood_marketing.yaml | 7 ----- .../mmlu_loglikelihood_medical_genetics.yaml | 7 ----- .../mmlu_loglikelihood_miscellaneous.yaml | 7 ----- .../mmlu_loglikelihood_moral_disputes.yaml | 7 ----- .../mmlu_loglikelihood_moral_scenarios.yaml | 7 ----- .../mmlu_loglikelihood_nutrition.yaml | 7 ----- .../mmlu_loglikelihood_philosophy.yaml | 7 ----- .../mmlu_loglikelihood_prehistory.yaml | 7 ----- ...loglikelihood_professional_accounting.yaml | 7 ----- .../mmlu_loglikelihood_professional_law.yaml | 7 ----- ...u_loglikelihood_professional_medicine.yaml | 7 ----- ...loglikelihood_professional_psychology.yaml | 7 ----- .../mmlu_loglikelihood_public_relations.yaml | 7 ----- .../mmlu_loglikelihood_security_studies.yaml | 7 ----- .../mmlu_loglikelihood_sociology.yaml | 7 ----- .../mmlu_loglikelihood_us_foreign_policy.yaml | 7 ----- .../mmlu_loglikelihood_virology.yaml | 7 ----- .../mmlu_loglikelihood_world_religions.yaml | 7 ----- .../flan_n_shot/mmlu_machine_learning.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_management.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_marketing.yaml | 7 ----- .../flan_n_shot/mmlu_medical_genetics.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_miscellaneous.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_moral_disputes.yaml | 7 ----- .../flan_n_shot/mmlu_moral_scenarios.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_nutrition.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_philosophy.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_prehistory.yaml | 7 ----- .../mmlu_professional_accounting.yaml | 7 ----- .../flan_n_shot/mmlu_professional_law.yaml | 7 ----- .../mmlu_professional_medicine.yaml | 7 ----- .../mmlu_professional_psychology.yaml | 7 ----- .../flan_n_shot/mmlu_public_relations.yaml | 7 ----- .../flan_n_shot/mmlu_security_studies.yaml | 7 ----- .../mmlu/flan_n_shot/mmlu_sociology.yaml | 7 ----- .../flan_n_shot/mmlu_us_foreign_policy.yaml | 7 ----- .../tasks/mmlu/flan_n_shot/mmlu_virology.yaml | 7 ----- .../flan_n_shot/mmlu_world_religions.yaml | 7 ----- 125 files changed, 45 insertions(+), 870 deletions(-) delete mode 100644 lm_eval/tasks/mmlu/default/hendrycks_test_original_default.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_anatomy.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_astronomy.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_econometrics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_jurisprudence.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_anatomy.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_astronomy.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_econometrics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_jurisprudence.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_management.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_marketing.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_miscellaneous.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_nutrition.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_philosophy.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_prehistory.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_sociology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_virology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_management.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_marketing.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_miscellaneous.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_nutrition.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_philosophy.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_prehistory.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_sociology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_virology.yaml delete mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml diff --git a/lm_eval/tasks/bbh/README.md b/lm_eval/tasks/bbh/README.md index 4f8dad49..eb3090bd 100644 --- a/lm_eval/tasks/bbh/README.md +++ b/lm_eval/tasks/bbh/README.md @@ -26,6 +26,10 @@ Homepage: https://github.com/suzgunmirac/BIG-Bench-Hard #### Groups - `bbh_flan_zeroshot` +- `bbh_flan_fewshot` +- `bbh_flan_cot_fewshot` +- `bbh_flan_cot_zeroshot` + #### Tasks diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml index e435e57b..34d7f066 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml @@ -7,8 +7,8 @@ metric_list: - metric: exact_match aggregation: mean higher_is_better: true - ignore_case: true - ignore_punctuation: true + # ignore_case: true + # ignore_punctuation: true generation_kwargs: until: - "" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml index f660be72..bda6eb96 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml @@ -7,8 +7,8 @@ metric_list: - metric: exact_match aggregation: mean higher_is_better: true - ignore_case: true - ignore_punctuation: true + # ignore_case: true + # ignore_punctuation: true generation_kwargs: until: - "" diff --git a/lm_eval/tasks/bbh/flan_fewshot/_flan_fewshot_template_yaml b/lm_eval/tasks/bbh/flan_fewshot/_flan_fewshot_template_yaml index ff53aabc..89e5de29 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/_flan_fewshot_template_yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/_flan_fewshot_template_yaml @@ -7,8 +7,8 @@ metric_list: - metric: exact_match aggregation: mean higher_is_better: true - ignore_case: true - ignore_punctuation: true + # ignore_case: true + # ignore_punctuation: true generation_kwargs: until: - "" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/_flan_zeroshot_template_yaml b/lm_eval/tasks/bbh/flan_zeroshot/_flan_zeroshot_template_yaml index 832c728d..66dbf369 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/_flan_zeroshot_template_yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/_flan_zeroshot_template_yaml @@ -7,8 +7,8 @@ metric_list: - metric: exact_match aggregation: mean higher_is_better: true - ignore_case: true - ignore_punctuation: true + # ignore_case: true + # ignore_punctuation: true generation_kwargs: until: - "" diff --git a/lm_eval/tasks/mmlu/_generate_configs.py b/lm_eval/tasks/mmlu/_generate_configs.py index af9bd0c6..36a3936c 100644 --- a/lm_eval/tasks/mmlu/_generate_configs.py +++ b/lm_eval/tasks/mmlu/_generate_configs.py @@ -92,7 +92,6 @@ if __name__ == "__main__": base_yaml_name = os.path.split(args.base_yaml_path)[-1] with open(args.base_yaml_path) as f: base_yaml = yaml.full_load(f) - print(base_yaml) if args.cot_prompt_path is not None: import json @@ -115,4 +114,4 @@ if __name__ == "__main__": file_save_path = args.save_prefix_path + f"_{subject}.yaml" eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}") with open(file_save_path, "w") as yaml_file: - yaml.dump(yaml_dict, yaml_file, width=float("inf")) + yaml.dump(yaml_dict, yaml_file, width=float("inf"), allow_unicode=True, default_style='"') diff --git a/lm_eval/tasks/mmlu/default/hendrycks_test_original_default.yaml b/lm_eval/tasks/mmlu/default/hendrycks_test_original_default.yaml deleted file mode 100644 index 248e7561..00000000 --- a/lm_eval/tasks/mmlu/default/hendrycks_test_original_default.yaml +++ /dev/null @@ -1,21 +0,0 @@ -group: - - mmlu - - mmlu_original - - multiple_choice -task: mmlu_original_abstract_algebra -dataset_path: cais/mmlu -dataset_name: abstract_algebra -output_type: multiple_choice -validation_split: validation -test_split: test -description: "The following are multiple choice questions (with answers) about abstract algebra.\n\n" -doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" -doc_to_choice: ["A", "B", "C", "D"] -doc_to_target: "{{answer}}" -metric_list: - - metric: acc - aggregation: mean - higher_is_better: true - - metric: acc_norm - aggregation: mean - higher_is_better: true diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml index 64587d54..ffa9ee87 100644 --- a/lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml @@ -2,24 +2,23 @@ group: mmlu_flan_cot_fewshot dataset_path: cais/mmlu validation_split: validation fewshot_split: dev -doc_to_text: "\n\nQ: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: Let's think step by step." -fewshot_delimiter: "" output_type: greedy_until +doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: Let's think step by step." doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}" -metric_list: - - metric: exact_match - aggregation: mean - higher_is_better: true - ignore_case: true - ignore_punctuation: true +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "(?<=The answer is )(.*)(?=.)" + - function: "take_first" generation_kwargs: until: - "" do_sample: false temperature: 0.0 -filter_list: - - name: "get-answer" - filter: - - function: "regex" - regex_pattern: "(?<=The answer is )(.*)(.)" - - function: "take_first" \ No newline at end of file +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true \ No newline at end of file diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml index e5b8e429..c9b03734 100644 --- a/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml @@ -2,24 +2,23 @@ group: mmlu_flan_cot_zeroshot dataset_path: cais/mmlu validation_split: validation fewshot_split: dev -doc_to_text: "\n\nQ: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: Let's think step by step." output_type: greedy_until -fewshot_delimiter: "" +doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: Let's think step by step." doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}" +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "(?<=The answer is )(.*)(?=.)" + - function: "take_first" +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 metric_list: - metric: exact_match aggregation: mean higher_is_better: true ignore_case: true ignore_punctuation: true -generation_kwargs: - until: - - "" - do_sample: false - temperature: 0.0 -filter_list: - - name: "get-answer" - filter: - - function: "regex" - regex_pattern: "(?<=The answer is )(.*)(.)" - - function: "take_first" \ No newline at end of file diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml index c52f8dc7..b1ff96a8 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml @@ -2,19 +2,13 @@ group: mmlu_flan_n_shot_generative dataset_path: cais/mmlu test_split: test fewshot_split: dev -# doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: " -doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" output_type: greedy_until -# doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}" -doc_to_target: "{{['A', 'B', 'C', 'D'][answer]}}" +doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: " +doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}" +generation_kwargs: + until: + - "" metric_list: - metric: exact_match aggregation: mean higher_is_better: true - # ignore_case: true - # ignore_punctuation: true -generation_kwargs: - until: - - "" -# do_sample: false -# temperature: 0.0 \ No newline at end of file diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml index 2162ade8..2d5d92ef 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml @@ -1,11 +1,10 @@ group: mmlu_flan_n_shot_loglikelihood dataset_path: cais/mmlu -# validation_split: validation test_split: test fewshot_split: dev output_type: multiple_choice -doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" -doc_to_choice: ["A", "B", "C", "D"] +doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: " +doc_to_choice: ["(A)", "(B)", "(C)", "(D)"] doc_to_target: answer metric_list: - metric: acc diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml deleted file mode 100644 index 31729f37..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_abstract_algebra.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: abstract_algebra -description: 'The following are multiple choice questions (with answers) about abstract algebra. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_abstract_algebra diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_anatomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_anatomy.yaml deleted file mode 100644 index e8978402..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_anatomy.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: anatomy -description: 'The following are multiple choice questions (with answers) about anatomy. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_anatomy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_astronomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_astronomy.yaml deleted file mode 100644 index 66902758..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_astronomy.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: astronomy -description: 'The following are multiple choice questions (with answers) about astronomy. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_astronomy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml deleted file mode 100644 index d1dcf3c7..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_business_ethics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: business_ethics -description: 'The following are multiple choice questions (with answers) about business ethics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_business_ethics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml deleted file mode 100644 index 14b12359..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_clinical_knowledge.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: clinical_knowledge -description: 'The following are multiple choice questions (with answers) about clinical knowledge. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_clinical_knowledge diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml deleted file mode 100644 index 0d202b8e..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_biology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_biology -description: 'The following are multiple choice questions (with answers) about college biology. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_college_biology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml deleted file mode 100644 index 77f6328f..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_chemistry.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_chemistry -description: 'The following are multiple choice questions (with answers) about college chemistry. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_college_chemistry diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml deleted file mode 100644 index f5cbda28..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_computer_science.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_computer_science -description: 'The following are multiple choice questions (with answers) about college computer science. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_college_computer_science diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml deleted file mode 100644 index dbc9be4c..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_mathematics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_mathematics -description: 'The following are multiple choice questions (with answers) about college mathematics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_college_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml deleted file mode 100644 index efc868f0..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_medicine.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_medicine -description: 'The following are multiple choice questions (with answers) about college medicine. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_college_medicine diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml deleted file mode 100644 index d92c14ea..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_college_physics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_physics -description: 'The following are multiple choice questions (with answers) about college physics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_college_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml deleted file mode 100644 index 3ddf3ee5..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_computer_security.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: computer_security -description: 'The following are multiple choice questions (with answers) about computer security. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_computer_security diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml deleted file mode 100644 index 7c4f90ed..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_conceptual_physics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: conceptual_physics -description: 'The following are multiple choice questions (with answers) about conceptual physics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_conceptual_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_econometrics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_econometrics.yaml deleted file mode 100644 index b46c90cb..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_econometrics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: econometrics -description: 'The following are multiple choice questions (with answers) about econometrics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_econometrics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml deleted file mode 100644 index 0308fe16..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_electrical_engineering.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: electrical_engineering -description: 'The following are multiple choice questions (with answers) about electrical engineering. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_electrical_engineering diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml deleted file mode 100644 index 2b8a8caf..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_elementary_mathematics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: elementary_mathematics -description: 'The following are multiple choice questions (with answers) about elementary mathematics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_elementary_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml deleted file mode 100644 index 10f58f41..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_formal_logic.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: formal_logic -description: 'The following are multiple choice questions (with answers) about formal logic. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_formal_logic diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml deleted file mode 100644 index 48816fe3..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_global_facts.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: global_facts -description: 'The following are multiple choice questions (with answers) about global facts. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_global_facts diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml deleted file mode 100644 index ebb1ded2..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_biology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_biology -description: 'The following are multiple choice questions (with answers) about high school biology. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_biology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml deleted file mode 100644 index 66a484a3..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_chemistry.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_chemistry -description: 'The following are multiple choice questions (with answers) about high school chemistry. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_chemistry diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml deleted file mode 100644 index b9a9060c..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_computer_science.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_computer_science -description: 'The following are multiple choice questions (with answers) about high school computer science. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_computer_science diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml deleted file mode 100644 index f89cca29..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_european_history.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_european_history -description: 'The following are multiple choice questions (with answers) about high school european history. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_european_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml deleted file mode 100644 index f255d37a..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_geography.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_geography -description: 'The following are multiple choice questions (with answers) about high school geography. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_geography diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml deleted file mode 100644 index 108aebf8..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_government_and_politics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_government_and_politics -description: 'The following are multiple choice questions (with answers) about high school government and politics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_government_and_politics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml deleted file mode 100644 index 720baeac..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_macroeconomics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_macroeconomics -description: 'The following are multiple choice questions (with answers) about high school macroeconomics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_macroeconomics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml deleted file mode 100644 index fbad67d6..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_mathematics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_mathematics -description: 'The following are multiple choice questions (with answers) about high school mathematics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml deleted file mode 100644 index 4b4e85bd..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_microeconomics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_microeconomics -description: 'The following are multiple choice questions (with answers) about high school microeconomics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_microeconomics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml deleted file mode 100644 index 941d6c22..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_physics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_physics -description: 'The following are multiple choice questions (with answers) about high school physics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml deleted file mode 100644 index 831907f0..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_psychology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_psychology -description: 'The following are multiple choice questions (with answers) about high school psychology. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_psychology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml deleted file mode 100644 index 255c7394..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_statistics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_statistics -description: 'The following are multiple choice questions (with answers) about high school statistics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_statistics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml deleted file mode 100644 index 4ea76cd3..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_us_history.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_us_history -description: 'The following are multiple choice questions (with answers) about high school us history. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_us_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml deleted file mode 100644 index 26551e82..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_high_school_world_history.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_world_history -description: 'The following are multiple choice questions (with answers) about high school world history. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_high_school_world_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml deleted file mode 100644 index 042e81cf..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_aging.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: human_aging -description: 'The following are multiple choice questions (with answers) about human aging. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_human_aging diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml deleted file mode 100644 index d2d55b70..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_human_sexuality.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: human_sexuality -description: 'The following are multiple choice questions (with answers) about human sexuality. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_human_sexuality diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml deleted file mode 100644 index 12b18807..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_international_law.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: international_law -description: 'The following are multiple choice questions (with answers) about international law. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_international_law diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_jurisprudence.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_jurisprudence.yaml deleted file mode 100644 index 51613f16..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_jurisprudence.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: jurisprudence -description: 'The following are multiple choice questions (with answers) about jurisprudence. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_jurisprudence diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml deleted file mode 100644 index aaaef665..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_logical_fallacies.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: logical_fallacies -description: 'The following are multiple choice questions (with answers) about logical fallacies. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_logical_fallacies diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml deleted file mode 100644 index 19f6d3ee..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_abstract_algebra.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: abstract_algebra -description: 'The following are multiple choice questions (with answers) about abstract algebra. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_abstract_algebra diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_anatomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_anatomy.yaml deleted file mode 100644 index ff927e05..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_anatomy.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: anatomy -description: 'The following are multiple choice questions (with answers) about anatomy. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_anatomy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_astronomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_astronomy.yaml deleted file mode 100644 index 95329c44..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_astronomy.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: astronomy -description: 'The following are multiple choice questions (with answers) about astronomy. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_astronomy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml deleted file mode 100644 index b6917938..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_business_ethics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: business_ethics -description: 'The following are multiple choice questions (with answers) about business ethics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_business_ethics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml deleted file mode 100644 index bc19e7c5..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_clinical_knowledge.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: clinical_knowledge -description: 'The following are multiple choice questions (with answers) about clinical knowledge. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_clinical_knowledge diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml deleted file mode 100644 index defc3d98..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_biology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_biology -description: 'The following are multiple choice questions (with answers) about college biology. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_college_biology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml deleted file mode 100644 index 15a2b3cc..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_chemistry.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_chemistry -description: 'The following are multiple choice questions (with answers) about college chemistry. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_college_chemistry diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml deleted file mode 100644 index ff69b70e..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_computer_science.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_computer_science -description: 'The following are multiple choice questions (with answers) about college computer science. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_college_computer_science diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml deleted file mode 100644 index fb67c2ee..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_mathematics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_mathematics -description: 'The following are multiple choice questions (with answers) about college mathematics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_college_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml deleted file mode 100644 index 6edac775..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_medicine.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_medicine -description: 'The following are multiple choice questions (with answers) about college medicine. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_college_medicine diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml deleted file mode 100644 index 2af2929f..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_college_physics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: college_physics -description: 'The following are multiple choice questions (with answers) about college physics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_college_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml deleted file mode 100644 index fe239463..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_computer_security.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: computer_security -description: 'The following are multiple choice questions (with answers) about computer security. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_computer_security diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml deleted file mode 100644 index a593cb97..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_conceptual_physics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: conceptual_physics -description: 'The following are multiple choice questions (with answers) about conceptual physics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_conceptual_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_econometrics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_econometrics.yaml deleted file mode 100644 index 034c0e63..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_econometrics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: econometrics -description: 'The following are multiple choice questions (with answers) about econometrics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_econometrics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml deleted file mode 100644 index 06e8e3b0..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_electrical_engineering.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: electrical_engineering -description: 'The following are multiple choice questions (with answers) about electrical engineering. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_electrical_engineering diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml deleted file mode 100644 index ea151100..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_elementary_mathematics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: elementary_mathematics -description: 'The following are multiple choice questions (with answers) about elementary mathematics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_elementary_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml deleted file mode 100644 index 2a2299fb..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_formal_logic.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: formal_logic -description: 'The following are multiple choice questions (with answers) about formal logic. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_formal_logic diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml deleted file mode 100644 index b3c003f3..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_global_facts.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: global_facts -description: 'The following are multiple choice questions (with answers) about global facts. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_global_facts diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml deleted file mode 100644 index 6a28adbd..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_biology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_biology -description: 'The following are multiple choice questions (with answers) about high school biology. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_biology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml deleted file mode 100644 index dedab0ad..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_chemistry.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_chemistry -description: 'The following are multiple choice questions (with answers) about high school chemistry. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_chemistry diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml deleted file mode 100644 index 8c096fd8..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_computer_science.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_computer_science -description: 'The following are multiple choice questions (with answers) about high school computer science. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_computer_science diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml deleted file mode 100644 index 8b2a2705..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_european_history.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_european_history -description: 'The following are multiple choice questions (with answers) about high school european history. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_european_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml deleted file mode 100644 index 32bcc3e1..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_geography.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_geography -description: 'The following are multiple choice questions (with answers) about high school geography. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_geography diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml deleted file mode 100644 index 191bc63b..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_government_and_politics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_government_and_politics -description: 'The following are multiple choice questions (with answers) about high school government and politics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_government_and_politics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml deleted file mode 100644 index 838ffed9..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_macroeconomics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_macroeconomics -description: 'The following are multiple choice questions (with answers) about high school macroeconomics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_macroeconomics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml deleted file mode 100644 index 246d8988..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_mathematics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_mathematics -description: 'The following are multiple choice questions (with answers) about high school mathematics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_mathematics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml deleted file mode 100644 index 1fea1850..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_microeconomics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_microeconomics -description: 'The following are multiple choice questions (with answers) about high school microeconomics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_microeconomics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml deleted file mode 100644 index 6aa802e8..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_physics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_physics -description: 'The following are multiple choice questions (with answers) about high school physics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_physics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml deleted file mode 100644 index 521b3e54..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_psychology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_psychology -description: 'The following are multiple choice questions (with answers) about high school psychology. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_psychology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml deleted file mode 100644 index 3cd82472..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_statistics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_statistics -description: 'The following are multiple choice questions (with answers) about high school statistics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_statistics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml deleted file mode 100644 index 34a7d05d..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_us_history.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_us_history -description: 'The following are multiple choice questions (with answers) about high school us history. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_us_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml deleted file mode 100644 index b6390aa3..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_high_school_world_history.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: high_school_world_history -description: 'The following are multiple choice questions (with answers) about high school world history. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_high_school_world_history diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml deleted file mode 100644 index bf454427..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_aging.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: human_aging -description: 'The following are multiple choice questions (with answers) about human aging. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_human_aging diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml deleted file mode 100644 index 7bec1aa9..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_human_sexuality.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: human_sexuality -description: 'The following are multiple choice questions (with answers) about human sexuality. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_human_sexuality diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml deleted file mode 100644 index 6d56237f..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_international_law.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: international_law -description: 'The following are multiple choice questions (with answers) about international law. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_international_law diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_jurisprudence.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_jurisprudence.yaml deleted file mode 100644 index a95f42ed..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_jurisprudence.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: jurisprudence -description: 'The following are multiple choice questions (with answers) about jurisprudence. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_jurisprudence diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml deleted file mode 100644 index bc2b1b41..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_logical_fallacies.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: logical_fallacies -description: 'The following are multiple choice questions (with answers) about logical fallacies. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_logical_fallacies diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml deleted file mode 100644 index 3688fd28..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_machine_learning.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: machine_learning -description: 'The following are multiple choice questions (with answers) about machine learning. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_machine_learning diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_management.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_management.yaml deleted file mode 100644 index 70eb8768..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_management.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: management -description: 'The following are multiple choice questions (with answers) about management. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_management diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_marketing.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_marketing.yaml deleted file mode 100644 index 48c03524..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_marketing.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: marketing -description: 'The following are multiple choice questions (with answers) about marketing. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_marketing diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml deleted file mode 100644 index 68bd9c12..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_medical_genetics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: medical_genetics -description: 'The following are multiple choice questions (with answers) about medical genetics. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_medical_genetics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_miscellaneous.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_miscellaneous.yaml deleted file mode 100644 index bde2352b..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_miscellaneous.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: miscellaneous -description: 'The following are multiple choice questions (with answers) about miscellaneous. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_miscellaneous diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml deleted file mode 100644 index c55d44bd..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_disputes.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: moral_disputes -description: 'The following are multiple choice questions (with answers) about moral disputes. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_moral_disputes diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml deleted file mode 100644 index 99bcb848..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_moral_scenarios.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: moral_scenarios -description: 'The following are multiple choice questions (with answers) about moral scenarios. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_moral_scenarios diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_nutrition.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_nutrition.yaml deleted file mode 100644 index 34c0040a..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_nutrition.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: nutrition -description: 'The following are multiple choice questions (with answers) about nutrition. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_nutrition diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_philosophy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_philosophy.yaml deleted file mode 100644 index 83588531..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_philosophy.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: philosophy -description: 'The following are multiple choice questions (with answers) about philosophy. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_philosophy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_prehistory.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_prehistory.yaml deleted file mode 100644 index a94b514b..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_prehistory.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: prehistory -description: 'The following are multiple choice questions (with answers) about prehistory. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_prehistory diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml deleted file mode 100644 index 2f64beae..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_accounting.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: professional_accounting -description: 'The following are multiple choice questions (with answers) about professional accounting. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_professional_accounting diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml deleted file mode 100644 index de0e8392..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_law.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: professional_law -description: 'The following are multiple choice questions (with answers) about professional law. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_professional_law diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml deleted file mode 100644 index ef4d0c07..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_medicine.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: professional_medicine -description: 'The following are multiple choice questions (with answers) about professional medicine. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_professional_medicine diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml deleted file mode 100644 index 9bb12be0..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_professional_psychology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: professional_psychology -description: 'The following are multiple choice questions (with answers) about professional psychology. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_professional_psychology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml deleted file mode 100644 index e9a761cc..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_public_relations.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: public_relations -description: 'The following are multiple choice questions (with answers) about public relations. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_public_relations diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml deleted file mode 100644 index 6a141ba6..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_security_studies.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: security_studies -description: 'The following are multiple choice questions (with answers) about security studies. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_security_studies diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_sociology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_sociology.yaml deleted file mode 100644 index 11069f9e..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_sociology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: sociology -description: 'The following are multiple choice questions (with answers) about sociology. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_sociology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml deleted file mode 100644 index 7aa77456..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_us_foreign_policy.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: us_foreign_policy -description: 'The following are multiple choice questions (with answers) about us foreign policy. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_us_foreign_policy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_virology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_virology.yaml deleted file mode 100644 index 9a9b94a1..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_virology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: virology -description: 'The following are multiple choice questions (with answers) about virology. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_virology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml deleted file mode 100644 index 6f26fda1..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_loglikelihood_world_religions.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: world_religions -description: 'The following are multiple choice questions (with answers) about world religions. - - - ' -include: _mmlu_flan_loglikelihood_template_yaml -task: mmlu_flan_n_shot_loglikelihood_world_religions diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml deleted file mode 100644 index c97c9f09..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_machine_learning.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: machine_learning -description: 'The following are multiple choice questions (with answers) about machine learning. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_machine_learning diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_management.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_management.yaml deleted file mode 100644 index 9c0c65b0..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_management.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: management -description: 'The following are multiple choice questions (with answers) about management. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_management diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_marketing.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_marketing.yaml deleted file mode 100644 index e2a74ca0..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_marketing.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: marketing -description: 'The following are multiple choice questions (with answers) about marketing. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_marketing diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml deleted file mode 100644 index 0464b15c..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_medical_genetics.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: medical_genetics -description: 'The following are multiple choice questions (with answers) about medical genetics. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_medical_genetics diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_miscellaneous.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_miscellaneous.yaml deleted file mode 100644 index 389ca552..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_miscellaneous.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: miscellaneous -description: 'The following are multiple choice questions (with answers) about miscellaneous. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_miscellaneous diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml deleted file mode 100644 index 671ca84e..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_disputes.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: moral_disputes -description: 'The following are multiple choice questions (with answers) about moral disputes. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_moral_disputes diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml deleted file mode 100644 index 1ecbff40..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_moral_scenarios.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: moral_scenarios -description: 'The following are multiple choice questions (with answers) about moral scenarios. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_moral_scenarios diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_nutrition.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_nutrition.yaml deleted file mode 100644 index 6d2da5cb..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_nutrition.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: nutrition -description: 'The following are multiple choice questions (with answers) about nutrition. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_nutrition diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_philosophy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_philosophy.yaml deleted file mode 100644 index 421c50f9..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_philosophy.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: philosophy -description: 'The following are multiple choice questions (with answers) about philosophy. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_philosophy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_prehistory.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_prehistory.yaml deleted file mode 100644 index 6e534911..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_prehistory.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: prehistory -description: 'The following are multiple choice questions (with answers) about prehistory. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_prehistory diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml deleted file mode 100644 index 93b4e4d3..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_accounting.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: professional_accounting -description: 'The following are multiple choice questions (with answers) about professional accounting. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_professional_accounting diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml deleted file mode 100644 index a8704652..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_law.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: professional_law -description: 'The following are multiple choice questions (with answers) about professional law. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_professional_law diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml deleted file mode 100644 index 137a39d5..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_medicine.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: professional_medicine -description: 'The following are multiple choice questions (with answers) about professional medicine. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_professional_medicine diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml deleted file mode 100644 index 342031f7..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_professional_psychology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: professional_psychology -description: 'The following are multiple choice questions (with answers) about professional psychology. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_professional_psychology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml deleted file mode 100644 index 88ffe1b8..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_public_relations.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: public_relations -description: 'The following are multiple choice questions (with answers) about public relations. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_public_relations diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml deleted file mode 100644 index b56c6803..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_security_studies.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: security_studies -description: 'The following are multiple choice questions (with answers) about security studies. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_security_studies diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_sociology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_sociology.yaml deleted file mode 100644 index bca11a0a..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_sociology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: sociology -description: 'The following are multiple choice questions (with answers) about sociology. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_sociology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml deleted file mode 100644 index 797fc9b7..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_us_foreign_policy.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: us_foreign_policy -description: 'The following are multiple choice questions (with answers) about us foreign policy. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_us_foreign_policy diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_virology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_virology.yaml deleted file mode 100644 index 6f6d1680..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_virology.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: virology -description: 'The following are multiple choice questions (with answers) about virology. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_virology diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml deleted file mode 100644 index b0253c46..00000000 --- a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_world_religions.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dataset_name: world_religions -description: 'The following are multiple choice questions (with answers) about world religions. - - - ' -include: _mmlu_flan_generative_template_yaml -task: mmlu_flan_n_shot_world_religions -- GitLab From 4a7528679fcbc71e5ce51c2686908ed728e1a17a Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 11:32:49 +0000 Subject: [PATCH 110/264] update flan mmlu --- lm_eval/tasks/mmlu/_generate_configs.py | 2 +- lm_eval/tasks/mmlu/default/_default_template_yaml | 15 +++++++++++++++ .../tasks/mmlu/default/mmlu_abstract_algebra.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_anatomy.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_astronomy.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_business_ethics.yaml | 4 ++++ .../mmlu/default/mmlu_clinical_knowledge.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_college_biology.yaml | 4 ++++ .../mmlu/default/mmlu_college_chemistry.yaml | 4 ++++ .../default/mmlu_college_computer_science.yaml | 4 ++++ .../mmlu/default/mmlu_college_mathematics.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_college_medicine.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_college_physics.yaml | 4 ++++ .../mmlu/default/mmlu_computer_security.yaml | 4 ++++ .../mmlu/default/mmlu_conceptual_physics.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_econometrics.yaml | 4 ++++ .../mmlu/default/mmlu_electrical_engineering.yaml | 4 ++++ .../mmlu/default/mmlu_elementary_mathematics.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_formal_logic.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_global_facts.yaml | 4 ++++ .../mmlu/default/mmlu_high_school_biology.yaml | 4 ++++ .../mmlu/default/mmlu_high_school_chemistry.yaml | 4 ++++ .../mmlu_high_school_computer_science.yaml | 4 ++++ .../mmlu_high_school_european_history.yaml | 4 ++++ .../mmlu/default/mmlu_high_school_geography.yaml | 4 ++++ .../mmlu_high_school_government_and_politics.yaml | 4 ++++ .../default/mmlu_high_school_macroeconomics.yaml | 4 ++++ .../default/mmlu_high_school_mathematics.yaml | 4 ++++ .../default/mmlu_high_school_microeconomics.yaml | 4 ++++ .../mmlu/default/mmlu_high_school_physics.yaml | 4 ++++ .../mmlu/default/mmlu_high_school_psychology.yaml | 4 ++++ .../mmlu/default/mmlu_high_school_statistics.yaml | 4 ++++ .../mmlu/default/mmlu_high_school_us_history.yaml | 4 ++++ .../default/mmlu_high_school_world_history.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_human_aging.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_human_sexuality.yaml | 4 ++++ .../mmlu/default/mmlu_international_law.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_jurisprudence.yaml | 4 ++++ .../mmlu/default/mmlu_logical_fallacies.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_machine_learning.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_management.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_marketing.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_medical_genetics.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_miscellaneous.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_moral_disputes.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_moral_scenarios.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_nutrition.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_philosophy.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_prehistory.yaml | 4 ++++ .../default/mmlu_professional_accounting.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_professional_law.yaml | 4 ++++ .../mmlu/default/mmlu_professional_medicine.yaml | 4 ++++ .../default/mmlu_professional_psychology.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_public_relations.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_security_studies.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_sociology.yaml | 4 ++++ .../mmlu/default/mmlu_us_foreign_policy.yaml | 4 ++++ lm_eval/tasks/mmlu/default/mmlu_virology.yaml | 4 ++++ .../tasks/mmlu/default/mmlu_world_religions.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_abstract_algebra.yaml | 4 ++++ .../tasks/mmlu/flan_n_shot/mmlu_gen_anatomy.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_astronomy.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_business_ethics.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_clinical_knowledge.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_college_biology.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_college_chemistry.yaml | 4 ++++ .../mmlu_gen_college_computer_science.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_college_mathematics.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_college_medicine.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_college_physics.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_computer_security.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_conceptual_physics.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_econometrics.yaml | 4 ++++ .../mmlu_gen_electrical_engineering.yaml | 4 ++++ .../mmlu_gen_elementary_mathematics.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_formal_logic.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_global_facts.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_high_school_biology.yaml | 4 ++++ .../mmlu_gen_high_school_chemistry.yaml | 4 ++++ .../mmlu_gen_high_school_computer_science.yaml | 4 ++++ .../mmlu_gen_high_school_european_history.yaml | 4 ++++ .../mmlu_gen_high_school_geography.yaml | 4 ++++ ...u_gen_high_school_government_and_politics.yaml | 4 ++++ .../mmlu_gen_high_school_macroeconomics.yaml | 4 ++++ .../mmlu_gen_high_school_mathematics.yaml | 4 ++++ .../mmlu_gen_high_school_microeconomics.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_high_school_physics.yaml | 4 ++++ .../mmlu_gen_high_school_psychology.yaml | 4 ++++ .../mmlu_gen_high_school_statistics.yaml | 4 ++++ .../mmlu_gen_high_school_us_history.yaml | 4 ++++ .../mmlu_gen_high_school_world_history.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_human_aging.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_human_sexuality.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_international_law.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_jurisprudence.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_logical_fallacies.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_machine_learning.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_management.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_marketing.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_medical_genetics.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_miscellaneous.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_moral_disputes.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_moral_scenarios.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_nutrition.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_philosophy.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_prehistory.yaml | 4 ++++ .../mmlu_gen_professional_accounting.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_professional_law.yaml | 4 ++++ .../mmlu_gen_professional_medicine.yaml | 4 ++++ .../mmlu_gen_professional_psychology.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_public_relations.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_security_studies.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_gen_sociology.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_us_foreign_policy.yaml | 4 ++++ .../tasks/mmlu/flan_n_shot/mmlu_gen_virology.yaml | 4 ++++ .../flan_n_shot/mmlu_gen_world_religions.yaml | 4 ++++ .../flan_n_shot/mmlu_log_abstract_algebra.yaml | 4 ++++ .../tasks/mmlu/flan_n_shot/mmlu_log_anatomy.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_astronomy.yaml | 4 ++++ .../flan_n_shot/mmlu_log_business_ethics.yaml | 4 ++++ .../flan_n_shot/mmlu_log_clinical_knowledge.yaml | 4 ++++ .../flan_n_shot/mmlu_log_college_biology.yaml | 4 ++++ .../flan_n_shot/mmlu_log_college_chemistry.yaml | 4 ++++ .../mmlu_log_college_computer_science.yaml | 4 ++++ .../flan_n_shot/mmlu_log_college_mathematics.yaml | 4 ++++ .../flan_n_shot/mmlu_log_college_medicine.yaml | 4 ++++ .../flan_n_shot/mmlu_log_college_physics.yaml | 4 ++++ .../flan_n_shot/mmlu_log_computer_security.yaml | 4 ++++ .../flan_n_shot/mmlu_log_conceptual_physics.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_econometrics.yaml | 4 ++++ .../mmlu_log_electrical_engineering.yaml | 4 ++++ .../mmlu_log_elementary_mathematics.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_formal_logic.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_global_facts.yaml | 4 ++++ .../flan_n_shot/mmlu_log_high_school_biology.yaml | 4 ++++ .../mmlu_log_high_school_chemistry.yaml | 4 ++++ .../mmlu_log_high_school_computer_science.yaml | 4 ++++ .../mmlu_log_high_school_european_history.yaml | 4 ++++ .../mmlu_log_high_school_geography.yaml | 4 ++++ ...u_log_high_school_government_and_politics.yaml | 4 ++++ .../mmlu_log_high_school_macroeconomics.yaml | 4 ++++ .../mmlu_log_high_school_mathematics.yaml | 4 ++++ .../mmlu_log_high_school_microeconomics.yaml | 4 ++++ .../flan_n_shot/mmlu_log_high_school_physics.yaml | 4 ++++ .../mmlu_log_high_school_psychology.yaml | 4 ++++ .../mmlu_log_high_school_statistics.yaml | 4 ++++ .../mmlu_log_high_school_us_history.yaml | 4 ++++ .../mmlu_log_high_school_world_history.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_human_aging.yaml | 4 ++++ .../flan_n_shot/mmlu_log_human_sexuality.yaml | 4 ++++ .../flan_n_shot/mmlu_log_international_law.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_jurisprudence.yaml | 4 ++++ .../flan_n_shot/mmlu_log_logical_fallacies.yaml | 4 ++++ .../flan_n_shot/mmlu_log_machine_learning.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_management.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_marketing.yaml | 4 ++++ .../flan_n_shot/mmlu_log_medical_genetics.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_miscellaneous.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_moral_disputes.yaml | 4 ++++ .../flan_n_shot/mmlu_log_moral_scenarios.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_nutrition.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_philosophy.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_prehistory.yaml | 4 ++++ .../mmlu_log_professional_accounting.yaml | 4 ++++ .../flan_n_shot/mmlu_log_professional_law.yaml | 4 ++++ .../mmlu_log_professional_medicine.yaml | 4 ++++ .../mmlu_log_professional_psychology.yaml | 4 ++++ .../flan_n_shot/mmlu_log_public_relations.yaml | 4 ++++ .../flan_n_shot/mmlu_log_security_studies.yaml | 4 ++++ .../mmlu/flan_n_shot/mmlu_log_sociology.yaml | 4 ++++ .../flan_n_shot/mmlu_log_us_foreign_policy.yaml | 4 ++++ .../tasks/mmlu/flan_n_shot/mmlu_log_virology.yaml | 4 ++++ .../flan_n_shot/mmlu_log_world_religions.yaml | 4 ++++ 173 files changed, 700 insertions(+), 1 deletion(-) create mode 100644 lm_eval/tasks/mmlu/default/_default_template_yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_abstract_algebra.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_anatomy.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_astronomy.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_business_ethics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_clinical_knowledge.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_college_biology.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_college_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_college_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_college_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_college_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_college_physics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_computer_security.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_conceptual_physics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_econometrics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_electrical_engineering.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_elementary_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_formal_logic.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_global_facts.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_biology.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_european_history.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_geography.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_government_and_politics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_macroeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_microeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_physics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_statistics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_us_history.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_high_school_world_history.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_human_aging.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_human_sexuality.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_international_law.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_jurisprudence.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_logical_fallacies.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_machine_learning.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_management.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_marketing.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_medical_genetics.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_miscellaneous.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_moral_disputes.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_moral_scenarios.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_nutrition.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_philosophy.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_prehistory.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_professional_accounting.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_professional_law.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_professional_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_professional_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_public_relations.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_security_studies.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_sociology.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_us_foreign_policy.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_virology.yaml create mode 100644 lm_eval/tasks/mmlu/default/mmlu_world_religions.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_abstract_algebra.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_anatomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_astronomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_business_ethics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_clinical_knowledge.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_computer_security.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_conceptual_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_econometrics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_electrical_engineering.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_elementary_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_formal_logic.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_global_facts.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_european_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_geography.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_government_and_politics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_macroeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_microeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_statistics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_us_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_world_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_human_aging.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_human_sexuality.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_international_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_jurisprudence.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_logical_fallacies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_machine_learning.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_management.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_marketing.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_medical_genetics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_miscellaneous.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_moral_disputes.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_moral_scenarios.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_nutrition.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_philosophy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_prehistory.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_accounting.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_public_relations.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_security_studies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_sociology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_us_foreign_policy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_virology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_world_religions.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_abstract_algebra.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_anatomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_astronomy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_business_ethics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_clinical_knowledge.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_computer_security.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_conceptual_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_econometrics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_electrical_engineering.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_elementary_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_formal_logic.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_global_facts.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_biology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_chemistry.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_computer_science.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_european_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_geography.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_government_and_politics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_macroeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_mathematics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_microeconomics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_physics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_statistics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_us_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_world_history.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_human_aging.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_human_sexuality.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_international_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_jurisprudence.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_logical_fallacies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_machine_learning.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_management.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_marketing.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_medical_genetics.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_miscellaneous.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_moral_disputes.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_moral_scenarios.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_nutrition.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_philosophy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_prehistory.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_accounting.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_law.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_medicine.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_psychology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_public_relations.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_security_studies.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_sociology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_us_foreign_policy.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_virology.yaml create mode 100644 lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_world_religions.yaml diff --git a/lm_eval/tasks/mmlu/_generate_configs.py b/lm_eval/tasks/mmlu/_generate_configs.py index 36a3936c..f0dbd6bd 100644 --- a/lm_eval/tasks/mmlu/_generate_configs.py +++ b/lm_eval/tasks/mmlu/_generate_configs.py @@ -106,7 +106,7 @@ if __name__ == "__main__": yaml_dict = { "include": base_yaml_name, - "task": f"mmlu_{args.task_prefix}_{subject}", + "task": f"mmlu_{args.task_prefix}_{subject}" if args.task_prefix != "" else f"mmlu_{subject}", "dataset_name": subject, "description": description, } diff --git a/lm_eval/tasks/mmlu/default/_default_template_yaml b/lm_eval/tasks/mmlu/default/_default_template_yaml new file mode 100644 index 00000000..757f7b5e --- /dev/null +++ b/lm_eval/tasks/mmlu/default/_default_template_yaml @@ -0,0 +1,15 @@ +group: mmlu +dataset_path: cais/mmlu +test_split: test +fewshot_split: dev +output_type: multiple_choice +doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true \ No newline at end of file diff --git a/lm_eval/tasks/mmlu/default/mmlu_abstract_algebra.yaml b/lm_eval/tasks/mmlu/default/mmlu_abstract_algebra.yaml new file mode 100644 index 00000000..b6d595d3 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_abstract_algebra.yaml @@ -0,0 +1,4 @@ +"dataset_name": "abstract_algebra" +"description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_abstract_algebra" diff --git a/lm_eval/tasks/mmlu/default/mmlu_anatomy.yaml b/lm_eval/tasks/mmlu/default/mmlu_anatomy.yaml new file mode 100644 index 00000000..6459cb41 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_anatomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "anatomy" +"description": "The following are multiple choice questions (with answers) about anatomy.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_anatomy" diff --git a/lm_eval/tasks/mmlu/default/mmlu_astronomy.yaml b/lm_eval/tasks/mmlu/default/mmlu_astronomy.yaml new file mode 100644 index 00000000..573dedd7 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_astronomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "astronomy" +"description": "The following are multiple choice questions (with answers) about astronomy.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_astronomy" diff --git a/lm_eval/tasks/mmlu/default/mmlu_business_ethics.yaml b/lm_eval/tasks/mmlu/default/mmlu_business_ethics.yaml new file mode 100644 index 00000000..4b20b795 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_business_ethics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "business_ethics" +"description": "The following are multiple choice questions (with answers) about business ethics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_business_ethics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_clinical_knowledge.yaml b/lm_eval/tasks/mmlu/default/mmlu_clinical_knowledge.yaml new file mode 100644 index 00000000..f758e66d --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_clinical_knowledge.yaml @@ -0,0 +1,4 @@ +"dataset_name": "clinical_knowledge" +"description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_clinical_knowledge" diff --git a/lm_eval/tasks/mmlu/default/mmlu_college_biology.yaml b/lm_eval/tasks/mmlu/default/mmlu_college_biology.yaml new file mode 100644 index 00000000..f8069007 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_college_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_biology" +"description": "The following are multiple choice questions (with answers) about college biology.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_college_biology" diff --git a/lm_eval/tasks/mmlu/default/mmlu_college_chemistry.yaml b/lm_eval/tasks/mmlu/default/mmlu_college_chemistry.yaml new file mode 100644 index 00000000..e03fbccd --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_college_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_chemistry" +"description": "The following are multiple choice questions (with answers) about college chemistry.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_college_chemistry" diff --git a/lm_eval/tasks/mmlu/default/mmlu_college_computer_science.yaml b/lm_eval/tasks/mmlu/default/mmlu_college_computer_science.yaml new file mode 100644 index 00000000..a9d4a6f2 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_college_computer_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_computer_science" +"description": "The following are multiple choice questions (with answers) about college computer science.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_college_computer_science" diff --git a/lm_eval/tasks/mmlu/default/mmlu_college_mathematics.yaml b/lm_eval/tasks/mmlu/default/mmlu_college_mathematics.yaml new file mode 100644 index 00000000..f6a86179 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_college_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_mathematics" +"description": "The following are multiple choice questions (with answers) about college mathematics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_college_mathematics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_college_medicine.yaml b/lm_eval/tasks/mmlu/default/mmlu_college_medicine.yaml new file mode 100644 index 00000000..0ea75fb3 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_college_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_medicine" +"description": "The following are multiple choice questions (with answers) about college medicine.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_college_medicine" diff --git a/lm_eval/tasks/mmlu/default/mmlu_college_physics.yaml b/lm_eval/tasks/mmlu/default/mmlu_college_physics.yaml new file mode 100644 index 00000000..82f13e40 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_college_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_physics" +"description": "The following are multiple choice questions (with answers) about college physics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_college_physics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_computer_security.yaml b/lm_eval/tasks/mmlu/default/mmlu_computer_security.yaml new file mode 100644 index 00000000..e9e06de2 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_computer_security.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_security" +"description": "The following are multiple choice questions (with answers) about computer security.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_computer_security" diff --git a/lm_eval/tasks/mmlu/default/mmlu_conceptual_physics.yaml b/lm_eval/tasks/mmlu/default/mmlu_conceptual_physics.yaml new file mode 100644 index 00000000..30ca6efe --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_conceptual_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "conceptual_physics" +"description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_conceptual_physics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_econometrics.yaml b/lm_eval/tasks/mmlu/default/mmlu_econometrics.yaml new file mode 100644 index 00000000..680cc507 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_econometrics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "econometrics" +"description": "The following are multiple choice questions (with answers) about econometrics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_econometrics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_electrical_engineering.yaml b/lm_eval/tasks/mmlu/default/mmlu_electrical_engineering.yaml new file mode 100644 index 00000000..8dd63b33 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_electrical_engineering.yaml @@ -0,0 +1,4 @@ +"dataset_name": "electrical_engineering" +"description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_electrical_engineering" diff --git a/lm_eval/tasks/mmlu/default/mmlu_elementary_mathematics.yaml b/lm_eval/tasks/mmlu/default/mmlu_elementary_mathematics.yaml new file mode 100644 index 00000000..4979ee30 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_elementary_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_mathematics" +"description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_elementary_mathematics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_formal_logic.yaml b/lm_eval/tasks/mmlu/default/mmlu_formal_logic.yaml new file mode 100644 index 00000000..9b73509b --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_formal_logic.yaml @@ -0,0 +1,4 @@ +"dataset_name": "formal_logic" +"description": "The following are multiple choice questions (with answers) about formal logic.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_formal_logic" diff --git a/lm_eval/tasks/mmlu/default/mmlu_global_facts.yaml b/lm_eval/tasks/mmlu/default/mmlu_global_facts.yaml new file mode 100644 index 00000000..8c43a6c9 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_global_facts.yaml @@ -0,0 +1,4 @@ +"dataset_name": "global_facts" +"description": "The following are multiple choice questions (with answers) about global facts.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_global_facts" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_biology.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_biology.yaml new file mode 100644 index 00000000..453d3033 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_biology" +"description": "The following are multiple choice questions (with answers) about high school biology.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_biology" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_chemistry.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_chemistry.yaml new file mode 100644 index 00000000..714ee0e5 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_chemistry" +"description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_chemistry" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_computer_science.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_computer_science.yaml new file mode 100644 index 00000000..9326e259 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_computer_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_computer_science" +"description": "The following are multiple choice questions (with answers) about high school computer science.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_computer_science" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_european_history.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_european_history.yaml new file mode 100644 index 00000000..e212cd22 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_european_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_european_history" +"description": "The following are multiple choice questions (with answers) about high school european history.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_european_history" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_geography.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_geography.yaml new file mode 100644 index 00000000..a7fffc25 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_geography" +"description": "The following are multiple choice questions (with answers) about high school geography.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_geography" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_government_and_politics.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_government_and_politics.yaml new file mode 100644 index 00000000..7255d60f --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_government_and_politics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_government_and_politics" +"description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_government_and_politics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_macroeconomics.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_macroeconomics.yaml new file mode 100644 index 00000000..29d9ddd7 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_macroeconomics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_macroeconomics" +"description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_macroeconomics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_mathematics.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_mathematics.yaml new file mode 100644 index 00000000..035e7a12 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_mathematics" +"description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_mathematics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_microeconomics.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_microeconomics.yaml new file mode 100644 index 00000000..72b1c8cf --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_microeconomics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_microeconomics" +"description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_microeconomics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_physics.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_physics.yaml new file mode 100644 index 00000000..ef8f6ca5 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_physics" +"description": "The following are multiple choice questions (with answers) about high school physics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_physics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_psychology.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_psychology.yaml new file mode 100644 index 00000000..5c4cce75 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_psychology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_psychology" +"description": "The following are multiple choice questions (with answers) about high school psychology.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_psychology" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_statistics.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_statistics.yaml new file mode 100644 index 00000000..20ed42ec --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_statistics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_statistics" +"description": "The following are multiple choice questions (with answers) about high school statistics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_statistics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_us_history.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_us_history.yaml new file mode 100644 index 00000000..18cd48da --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_us_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_us_history" +"description": "The following are multiple choice questions (with answers) about high school us history.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_us_history" diff --git a/lm_eval/tasks/mmlu/default/mmlu_high_school_world_history.yaml b/lm_eval/tasks/mmlu/default/mmlu_high_school_world_history.yaml new file mode 100644 index 00000000..b17daac6 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_high_school_world_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_world_history" +"description": "The following are multiple choice questions (with answers) about high school world history.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_high_school_world_history" diff --git a/lm_eval/tasks/mmlu/default/mmlu_human_aging.yaml b/lm_eval/tasks/mmlu/default/mmlu_human_aging.yaml new file mode 100644 index 00000000..080b2676 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_human_aging.yaml @@ -0,0 +1,4 @@ +"dataset_name": "human_aging" +"description": "The following are multiple choice questions (with answers) about human aging.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_human_aging" diff --git a/lm_eval/tasks/mmlu/default/mmlu_human_sexuality.yaml b/lm_eval/tasks/mmlu/default/mmlu_human_sexuality.yaml new file mode 100644 index 00000000..ca3389fe --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_human_sexuality.yaml @@ -0,0 +1,4 @@ +"dataset_name": "human_sexuality" +"description": "The following are multiple choice questions (with answers) about human sexuality.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_human_sexuality" diff --git a/lm_eval/tasks/mmlu/default/mmlu_international_law.yaml b/lm_eval/tasks/mmlu/default/mmlu_international_law.yaml new file mode 100644 index 00000000..a3d443e0 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_international_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "international_law" +"description": "The following are multiple choice questions (with answers) about international law.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_international_law" diff --git a/lm_eval/tasks/mmlu/default/mmlu_jurisprudence.yaml b/lm_eval/tasks/mmlu/default/mmlu_jurisprudence.yaml new file mode 100644 index 00000000..4ba00a2a --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_jurisprudence.yaml @@ -0,0 +1,4 @@ +"dataset_name": "jurisprudence" +"description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_jurisprudence" diff --git a/lm_eval/tasks/mmlu/default/mmlu_logical_fallacies.yaml b/lm_eval/tasks/mmlu/default/mmlu_logical_fallacies.yaml new file mode 100644 index 00000000..ea45a4f3 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_logical_fallacies.yaml @@ -0,0 +1,4 @@ +"dataset_name": "logical_fallacies" +"description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_logical_fallacies" diff --git a/lm_eval/tasks/mmlu/default/mmlu_machine_learning.yaml b/lm_eval/tasks/mmlu/default/mmlu_machine_learning.yaml new file mode 100644 index 00000000..2ba6d162 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_machine_learning.yaml @@ -0,0 +1,4 @@ +"dataset_name": "machine_learning" +"description": "The following are multiple choice questions (with answers) about machine learning.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_machine_learning" diff --git a/lm_eval/tasks/mmlu/default/mmlu_management.yaml b/lm_eval/tasks/mmlu/default/mmlu_management.yaml new file mode 100644 index 00000000..b4ea6da9 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_management.yaml @@ -0,0 +1,4 @@ +"dataset_name": "management" +"description": "The following are multiple choice questions (with answers) about management.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_management" diff --git a/lm_eval/tasks/mmlu/default/mmlu_marketing.yaml b/lm_eval/tasks/mmlu/default/mmlu_marketing.yaml new file mode 100644 index 00000000..afa30a0c --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_marketing.yaml @@ -0,0 +1,4 @@ +"dataset_name": "marketing" +"description": "The following are multiple choice questions (with answers) about marketing.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_marketing" diff --git a/lm_eval/tasks/mmlu/default/mmlu_medical_genetics.yaml b/lm_eval/tasks/mmlu/default/mmlu_medical_genetics.yaml new file mode 100644 index 00000000..92095635 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_medical_genetics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "medical_genetics" +"description": "The following are multiple choice questions (with answers) about medical genetics.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_medical_genetics" diff --git a/lm_eval/tasks/mmlu/default/mmlu_miscellaneous.yaml b/lm_eval/tasks/mmlu/default/mmlu_miscellaneous.yaml new file mode 100644 index 00000000..94ebd1b0 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_miscellaneous.yaml @@ -0,0 +1,4 @@ +"dataset_name": "miscellaneous" +"description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_miscellaneous" diff --git a/lm_eval/tasks/mmlu/default/mmlu_moral_disputes.yaml b/lm_eval/tasks/mmlu/default/mmlu_moral_disputes.yaml new file mode 100644 index 00000000..8bea0a1f --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_moral_disputes.yaml @@ -0,0 +1,4 @@ +"dataset_name": "moral_disputes" +"description": "The following are multiple choice questions (with answers) about moral disputes.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_moral_disputes" diff --git a/lm_eval/tasks/mmlu/default/mmlu_moral_scenarios.yaml b/lm_eval/tasks/mmlu/default/mmlu_moral_scenarios.yaml new file mode 100644 index 00000000..71dcc693 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_moral_scenarios.yaml @@ -0,0 +1,4 @@ +"dataset_name": "moral_scenarios" +"description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_moral_scenarios" diff --git a/lm_eval/tasks/mmlu/default/mmlu_nutrition.yaml b/lm_eval/tasks/mmlu/default/mmlu_nutrition.yaml new file mode 100644 index 00000000..e6b4cbcd --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_nutrition.yaml @@ -0,0 +1,4 @@ +"dataset_name": "nutrition" +"description": "The following are multiple choice questions (with answers) about nutrition.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_nutrition" diff --git a/lm_eval/tasks/mmlu/default/mmlu_philosophy.yaml b/lm_eval/tasks/mmlu/default/mmlu_philosophy.yaml new file mode 100644 index 00000000..b9a0b2c5 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_philosophy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "philosophy" +"description": "The following are multiple choice questions (with answers) about philosophy.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_philosophy" diff --git a/lm_eval/tasks/mmlu/default/mmlu_prehistory.yaml b/lm_eval/tasks/mmlu/default/mmlu_prehistory.yaml new file mode 100644 index 00000000..7f71bd54 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_prehistory.yaml @@ -0,0 +1,4 @@ +"dataset_name": "prehistory" +"description": "The following are multiple choice questions (with answers) about prehistory.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_prehistory" diff --git a/lm_eval/tasks/mmlu/default/mmlu_professional_accounting.yaml b/lm_eval/tasks/mmlu/default/mmlu_professional_accounting.yaml new file mode 100644 index 00000000..94ca6e6e --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_professional_accounting.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_accounting" +"description": "The following are multiple choice questions (with answers) about professional accounting.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_professional_accounting" diff --git a/lm_eval/tasks/mmlu/default/mmlu_professional_law.yaml b/lm_eval/tasks/mmlu/default/mmlu_professional_law.yaml new file mode 100644 index 00000000..074c34e6 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_professional_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_law" +"description": "The following are multiple choice questions (with answers) about professional law.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_professional_law" diff --git a/lm_eval/tasks/mmlu/default/mmlu_professional_medicine.yaml b/lm_eval/tasks/mmlu/default/mmlu_professional_medicine.yaml new file mode 100644 index 00000000..2f99c316 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_professional_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_medicine" +"description": "The following are multiple choice questions (with answers) about professional medicine.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_professional_medicine" diff --git a/lm_eval/tasks/mmlu/default/mmlu_professional_psychology.yaml b/lm_eval/tasks/mmlu/default/mmlu_professional_psychology.yaml new file mode 100644 index 00000000..01565848 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_professional_psychology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_psychology" +"description": "The following are multiple choice questions (with answers) about professional psychology.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_professional_psychology" diff --git a/lm_eval/tasks/mmlu/default/mmlu_public_relations.yaml b/lm_eval/tasks/mmlu/default/mmlu_public_relations.yaml new file mode 100644 index 00000000..0d46c66e --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_public_relations.yaml @@ -0,0 +1,4 @@ +"dataset_name": "public_relations" +"description": "The following are multiple choice questions (with answers) about public relations.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_public_relations" diff --git a/lm_eval/tasks/mmlu/default/mmlu_security_studies.yaml b/lm_eval/tasks/mmlu/default/mmlu_security_studies.yaml new file mode 100644 index 00000000..f30dffde --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_security_studies.yaml @@ -0,0 +1,4 @@ +"dataset_name": "security_studies" +"description": "The following are multiple choice questions (with answers) about security studies.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_security_studies" diff --git a/lm_eval/tasks/mmlu/default/mmlu_sociology.yaml b/lm_eval/tasks/mmlu/default/mmlu_sociology.yaml new file mode 100644 index 00000000..c36bd403 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_sociology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "sociology" +"description": "The following are multiple choice questions (with answers) about sociology.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_sociology" diff --git a/lm_eval/tasks/mmlu/default/mmlu_us_foreign_policy.yaml b/lm_eval/tasks/mmlu/default/mmlu_us_foreign_policy.yaml new file mode 100644 index 00000000..fe8c68d8 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_us_foreign_policy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "us_foreign_policy" +"description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_us_foreign_policy" diff --git a/lm_eval/tasks/mmlu/default/mmlu_virology.yaml b/lm_eval/tasks/mmlu/default/mmlu_virology.yaml new file mode 100644 index 00000000..4cbd0959 --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_virology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "virology" +"description": "The following are multiple choice questions (with answers) about virology.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_virology" diff --git a/lm_eval/tasks/mmlu/default/mmlu_world_religions.yaml b/lm_eval/tasks/mmlu/default/mmlu_world_religions.yaml new file mode 100644 index 00000000..375efbae --- /dev/null +++ b/lm_eval/tasks/mmlu/default/mmlu_world_religions.yaml @@ -0,0 +1,4 @@ +"dataset_name": "world_religions" +"description": "The following are multiple choice questions (with answers) about world religions.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_world_religions" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_abstract_algebra.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_abstract_algebra.yaml new file mode 100644 index 00000000..49b9c425 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_abstract_algebra.yaml @@ -0,0 +1,4 @@ +"dataset_name": "abstract_algebra" +"description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_abstract_algebra" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_anatomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_anatomy.yaml new file mode 100644 index 00000000..0c8d7914 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_anatomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "anatomy" +"description": "The following are multiple choice questions (with answers) about anatomy.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_anatomy" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_astronomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_astronomy.yaml new file mode 100644 index 00000000..c92a1027 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_astronomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "astronomy" +"description": "The following are multiple choice questions (with answers) about astronomy.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_astronomy" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_business_ethics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_business_ethics.yaml new file mode 100644 index 00000000..4b65902e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_business_ethics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "business_ethics" +"description": "The following are multiple choice questions (with answers) about business ethics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_business_ethics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_clinical_knowledge.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_clinical_knowledge.yaml new file mode 100644 index 00000000..295fb234 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_clinical_knowledge.yaml @@ -0,0 +1,4 @@ +"dataset_name": "clinical_knowledge" +"description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_clinical_knowledge" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_biology.yaml new file mode 100644 index 00000000..f945181b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_biology" +"description": "The following are multiple choice questions (with answers) about college biology.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_college_biology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_chemistry.yaml new file mode 100644 index 00000000..1fdab27d --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_chemistry" +"description": "The following are multiple choice questions (with answers) about college chemistry.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_college_chemistry" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_computer_science.yaml new file mode 100644 index 00000000..6b41a5bb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_computer_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_computer_science" +"description": "The following are multiple choice questions (with answers) about college computer science.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_college_computer_science" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_mathematics.yaml new file mode 100644 index 00000000..29e80a5e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_mathematics" +"description": "The following are multiple choice questions (with answers) about college mathematics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_college_mathematics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_medicine.yaml new file mode 100644 index 00000000..a5061541 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_medicine" +"description": "The following are multiple choice questions (with answers) about college medicine.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_college_medicine" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_physics.yaml new file mode 100644 index 00000000..ec3262ee --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_college_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_physics" +"description": "The following are multiple choice questions (with answers) about college physics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_college_physics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_computer_security.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_computer_security.yaml new file mode 100644 index 00000000..a9ade9c5 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_computer_security.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_security" +"description": "The following are multiple choice questions (with answers) about computer security.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_computer_security" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_conceptual_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_conceptual_physics.yaml new file mode 100644 index 00000000..5a903a65 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_conceptual_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "conceptual_physics" +"description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_conceptual_physics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_econometrics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_econometrics.yaml new file mode 100644 index 00000000..847c8ce6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_econometrics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "econometrics" +"description": "The following are multiple choice questions (with answers) about econometrics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_econometrics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_electrical_engineering.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_electrical_engineering.yaml new file mode 100644 index 00000000..038379e0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_electrical_engineering.yaml @@ -0,0 +1,4 @@ +"dataset_name": "electrical_engineering" +"description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_electrical_engineering" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_elementary_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_elementary_mathematics.yaml new file mode 100644 index 00000000..4fd779de --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_elementary_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_mathematics" +"description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_elementary_mathematics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_formal_logic.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_formal_logic.yaml new file mode 100644 index 00000000..bb528831 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_formal_logic.yaml @@ -0,0 +1,4 @@ +"dataset_name": "formal_logic" +"description": "The following are multiple choice questions (with answers) about formal logic.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_formal_logic" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_global_facts.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_global_facts.yaml new file mode 100644 index 00000000..1145dcab --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_global_facts.yaml @@ -0,0 +1,4 @@ +"dataset_name": "global_facts" +"description": "The following are multiple choice questions (with answers) about global facts.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_global_facts" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_biology.yaml new file mode 100644 index 00000000..574a0c58 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_biology" +"description": "The following are multiple choice questions (with answers) about high school biology.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_biology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_chemistry.yaml new file mode 100644 index 00000000..ef79ed73 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_chemistry" +"description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_chemistry" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_computer_science.yaml new file mode 100644 index 00000000..9d9200a6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_computer_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_computer_science" +"description": "The following are multiple choice questions (with answers) about high school computer science.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_computer_science" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_european_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_european_history.yaml new file mode 100644 index 00000000..e4b52a9c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_european_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_european_history" +"description": "The following are multiple choice questions (with answers) about high school european history.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_european_history" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_geography.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_geography.yaml new file mode 100644 index 00000000..8403d20e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_geography" +"description": "The following are multiple choice questions (with answers) about high school geography.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_geography" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_government_and_politics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_government_and_politics.yaml new file mode 100644 index 00000000..50ad3863 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_government_and_politics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_government_and_politics" +"description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_government_and_politics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_macroeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_macroeconomics.yaml new file mode 100644 index 00000000..18bfb8b1 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_macroeconomics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_macroeconomics" +"description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_macroeconomics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_mathematics.yaml new file mode 100644 index 00000000..1b04a06f --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_mathematics" +"description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_mathematics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_microeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_microeconomics.yaml new file mode 100644 index 00000000..9588af59 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_microeconomics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_microeconomics" +"description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_microeconomics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_physics.yaml new file mode 100644 index 00000000..4aa033c8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_physics" +"description": "The following are multiple choice questions (with answers) about high school physics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_physics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_psychology.yaml new file mode 100644 index 00000000..168c0c15 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_psychology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_psychology" +"description": "The following are multiple choice questions (with answers) about high school psychology.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_psychology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_statistics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_statistics.yaml new file mode 100644 index 00000000..ba195da9 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_statistics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_statistics" +"description": "The following are multiple choice questions (with answers) about high school statistics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_statistics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_us_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_us_history.yaml new file mode 100644 index 00000000..0605fbc4 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_us_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_us_history" +"description": "The following are multiple choice questions (with answers) about high school us history.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_us_history" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_world_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_world_history.yaml new file mode 100644 index 00000000..aa54d758 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_high_school_world_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_world_history" +"description": "The following are multiple choice questions (with answers) about high school world history.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_high_school_world_history" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_human_aging.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_human_aging.yaml new file mode 100644 index 00000000..d47b7fef --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_human_aging.yaml @@ -0,0 +1,4 @@ +"dataset_name": "human_aging" +"description": "The following are multiple choice questions (with answers) about human aging.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_human_aging" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_human_sexuality.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_human_sexuality.yaml new file mode 100644 index 00000000..9be15e54 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_human_sexuality.yaml @@ -0,0 +1,4 @@ +"dataset_name": "human_sexuality" +"description": "The following are multiple choice questions (with answers) about human sexuality.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_human_sexuality" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_international_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_international_law.yaml new file mode 100644 index 00000000..b80c9d58 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_international_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "international_law" +"description": "The following are multiple choice questions (with answers) about international law.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_international_law" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_jurisprudence.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_jurisprudence.yaml new file mode 100644 index 00000000..5e7a5395 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_jurisprudence.yaml @@ -0,0 +1,4 @@ +"dataset_name": "jurisprudence" +"description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_jurisprudence" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_logical_fallacies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_logical_fallacies.yaml new file mode 100644 index 00000000..fcb718a0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_logical_fallacies.yaml @@ -0,0 +1,4 @@ +"dataset_name": "logical_fallacies" +"description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_logical_fallacies" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_machine_learning.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_machine_learning.yaml new file mode 100644 index 00000000..d879b54c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_machine_learning.yaml @@ -0,0 +1,4 @@ +"dataset_name": "machine_learning" +"description": "The following are multiple choice questions (with answers) about machine learning.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_machine_learning" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_management.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_management.yaml new file mode 100644 index 00000000..887c71a3 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_management.yaml @@ -0,0 +1,4 @@ +"dataset_name": "management" +"description": "The following are multiple choice questions (with answers) about management.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_management" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_marketing.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_marketing.yaml new file mode 100644 index 00000000..bad500ca --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_marketing.yaml @@ -0,0 +1,4 @@ +"dataset_name": "marketing" +"description": "The following are multiple choice questions (with answers) about marketing.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_marketing" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_medical_genetics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_medical_genetics.yaml new file mode 100644 index 00000000..c4faff12 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_medical_genetics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "medical_genetics" +"description": "The following are multiple choice questions (with answers) about medical genetics.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_medical_genetics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_miscellaneous.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_miscellaneous.yaml new file mode 100644 index 00000000..e9aac340 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_miscellaneous.yaml @@ -0,0 +1,4 @@ +"dataset_name": "miscellaneous" +"description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_miscellaneous" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_moral_disputes.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_moral_disputes.yaml new file mode 100644 index 00000000..41af33e0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_moral_disputes.yaml @@ -0,0 +1,4 @@ +"dataset_name": "moral_disputes" +"description": "The following are multiple choice questions (with answers) about moral disputes.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_moral_disputes" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_moral_scenarios.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_moral_scenarios.yaml new file mode 100644 index 00000000..1689c3d9 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_moral_scenarios.yaml @@ -0,0 +1,4 @@ +"dataset_name": "moral_scenarios" +"description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_moral_scenarios" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_nutrition.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_nutrition.yaml new file mode 100644 index 00000000..24be1a63 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_nutrition.yaml @@ -0,0 +1,4 @@ +"dataset_name": "nutrition" +"description": "The following are multiple choice questions (with answers) about nutrition.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_nutrition" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_philosophy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_philosophy.yaml new file mode 100644 index 00000000..01040729 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_philosophy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "philosophy" +"description": "The following are multiple choice questions (with answers) about philosophy.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_philosophy" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_prehistory.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_prehistory.yaml new file mode 100644 index 00000000..fc5a6fbe --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_prehistory.yaml @@ -0,0 +1,4 @@ +"dataset_name": "prehistory" +"description": "The following are multiple choice questions (with answers) about prehistory.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_prehistory" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_accounting.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_accounting.yaml new file mode 100644 index 00000000..cbdd2f0d --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_accounting.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_accounting" +"description": "The following are multiple choice questions (with answers) about professional accounting.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_professional_accounting" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_law.yaml new file mode 100644 index 00000000..42e46529 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_law" +"description": "The following are multiple choice questions (with answers) about professional law.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_professional_law" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_medicine.yaml new file mode 100644 index 00000000..a64610e6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_medicine" +"description": "The following are multiple choice questions (with answers) about professional medicine.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_professional_medicine" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_psychology.yaml new file mode 100644 index 00000000..b0c574fe --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_professional_psychology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_psychology" +"description": "The following are multiple choice questions (with answers) about professional psychology.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_professional_psychology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_public_relations.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_public_relations.yaml new file mode 100644 index 00000000..ff1030fc --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_public_relations.yaml @@ -0,0 +1,4 @@ +"dataset_name": "public_relations" +"description": "The following are multiple choice questions (with answers) about public relations.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_public_relations" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_security_studies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_security_studies.yaml new file mode 100644 index 00000000..25555da4 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_security_studies.yaml @@ -0,0 +1,4 @@ +"dataset_name": "security_studies" +"description": "The following are multiple choice questions (with answers) about security studies.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_security_studies" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_sociology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_sociology.yaml new file mode 100644 index 00000000..f8ac254c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_sociology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "sociology" +"description": "The following are multiple choice questions (with answers) about sociology.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_sociology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_us_foreign_policy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_us_foreign_policy.yaml new file mode 100644 index 00000000..af3917ac --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_us_foreign_policy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "us_foreign_policy" +"description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_us_foreign_policy" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_virology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_virology.yaml new file mode 100644 index 00000000..b8df2d59 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_virology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "virology" +"description": "The following are multiple choice questions (with answers) about virology.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_virology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_world_religions.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_world_religions.yaml new file mode 100644 index 00000000..496f66c5 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_gen_world_religions.yaml @@ -0,0 +1,4 @@ +"dataset_name": "world_religions" +"description": "The following are multiple choice questions (with answers) about world religions.\n\n" +"include": "_mmlu_flan_generative_template_yaml" +"task": "mmlu_flan_n_shot_generative_world_religions" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_abstract_algebra.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_abstract_algebra.yaml new file mode 100644 index 00000000..4ea918d6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_abstract_algebra.yaml @@ -0,0 +1,4 @@ +"dataset_name": "abstract_algebra" +"description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_abstract_algebra" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_anatomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_anatomy.yaml new file mode 100644 index 00000000..9205bd31 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_anatomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "anatomy" +"description": "The following are multiple choice questions (with answers) about anatomy.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_anatomy" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_astronomy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_astronomy.yaml new file mode 100644 index 00000000..dcd41de7 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_astronomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "astronomy" +"description": "The following are multiple choice questions (with answers) about astronomy.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_astronomy" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_business_ethics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_business_ethics.yaml new file mode 100644 index 00000000..2b57abf3 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_business_ethics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "business_ethics" +"description": "The following are multiple choice questions (with answers) about business ethics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_business_ethics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_clinical_knowledge.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_clinical_knowledge.yaml new file mode 100644 index 00000000..5b5da42e --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_clinical_knowledge.yaml @@ -0,0 +1,4 @@ +"dataset_name": "clinical_knowledge" +"description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_clinical_knowledge" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_biology.yaml new file mode 100644 index 00000000..c8cc429d --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_biology" +"description": "The following are multiple choice questions (with answers) about college biology.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_college_biology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_chemistry.yaml new file mode 100644 index 00000000..8be3a04d --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_chemistry" +"description": "The following are multiple choice questions (with answers) about college chemistry.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_college_chemistry" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_computer_science.yaml new file mode 100644 index 00000000..506ee760 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_computer_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_computer_science" +"description": "The following are multiple choice questions (with answers) about college computer science.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_college_computer_science" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_mathematics.yaml new file mode 100644 index 00000000..a9fe1814 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_mathematics" +"description": "The following are multiple choice questions (with answers) about college mathematics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_college_mathematics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_medicine.yaml new file mode 100644 index 00000000..6f5d767a --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_medicine" +"description": "The following are multiple choice questions (with answers) about college medicine.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_college_medicine" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_physics.yaml new file mode 100644 index 00000000..c6c22a40 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_college_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_physics" +"description": "The following are multiple choice questions (with answers) about college physics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_college_physics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_computer_security.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_computer_security.yaml new file mode 100644 index 00000000..96bccc15 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_computer_security.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_security" +"description": "The following are multiple choice questions (with answers) about computer security.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_computer_security" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_conceptual_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_conceptual_physics.yaml new file mode 100644 index 00000000..2fc15ed0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_conceptual_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "conceptual_physics" +"description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_conceptual_physics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_econometrics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_econometrics.yaml new file mode 100644 index 00000000..07dbf921 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_econometrics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "econometrics" +"description": "The following are multiple choice questions (with answers) about econometrics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_econometrics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_electrical_engineering.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_electrical_engineering.yaml new file mode 100644 index 00000000..94492b11 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_electrical_engineering.yaml @@ -0,0 +1,4 @@ +"dataset_name": "electrical_engineering" +"description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_electrical_engineering" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_elementary_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_elementary_mathematics.yaml new file mode 100644 index 00000000..2cc56ef8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_elementary_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_mathematics" +"description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_elementary_mathematics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_formal_logic.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_formal_logic.yaml new file mode 100644 index 00000000..17e28205 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_formal_logic.yaml @@ -0,0 +1,4 @@ +"dataset_name": "formal_logic" +"description": "The following are multiple choice questions (with answers) about formal logic.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_formal_logic" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_global_facts.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_global_facts.yaml new file mode 100644 index 00000000..2b3cb863 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_global_facts.yaml @@ -0,0 +1,4 @@ +"dataset_name": "global_facts" +"description": "The following are multiple choice questions (with answers) about global facts.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_global_facts" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_biology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_biology.yaml new file mode 100644 index 00000000..ed3e70b2 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_biology" +"description": "The following are multiple choice questions (with answers) about high school biology.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_biology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_chemistry.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_chemistry.yaml new file mode 100644 index 00000000..729d37fa --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_chemistry" +"description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_chemistry" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_computer_science.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_computer_science.yaml new file mode 100644 index 00000000..7003e94c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_computer_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_computer_science" +"description": "The following are multiple choice questions (with answers) about high school computer science.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_computer_science" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_european_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_european_history.yaml new file mode 100644 index 00000000..0ad96085 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_european_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_european_history" +"description": "The following are multiple choice questions (with answers) about high school european history.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_european_history" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_geography.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_geography.yaml new file mode 100644 index 00000000..f26e8bc6 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_geography" +"description": "The following are multiple choice questions (with answers) about high school geography.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_geography" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_government_and_politics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_government_and_politics.yaml new file mode 100644 index 00000000..523e278d --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_government_and_politics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_government_and_politics" +"description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_government_and_politics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_macroeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_macroeconomics.yaml new file mode 100644 index 00000000..6b08a4fc --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_macroeconomics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_macroeconomics" +"description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_macroeconomics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_mathematics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_mathematics.yaml new file mode 100644 index 00000000..83244ebb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_mathematics" +"description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_mathematics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_microeconomics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_microeconomics.yaml new file mode 100644 index 00000000..982f3f08 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_microeconomics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_microeconomics" +"description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_microeconomics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_physics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_physics.yaml new file mode 100644 index 00000000..25c32369 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_physics" +"description": "The following are multiple choice questions (with answers) about high school physics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_physics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_psychology.yaml new file mode 100644 index 00000000..a6e431db --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_psychology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_psychology" +"description": "The following are multiple choice questions (with answers) about high school psychology.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_psychology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_statistics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_statistics.yaml new file mode 100644 index 00000000..fa9075f5 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_statistics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_statistics" +"description": "The following are multiple choice questions (with answers) about high school statistics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_statistics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_us_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_us_history.yaml new file mode 100644 index 00000000..094f95d0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_us_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_us_history" +"description": "The following are multiple choice questions (with answers) about high school us history.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_us_history" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_world_history.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_world_history.yaml new file mode 100644 index 00000000..6ffd6d08 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_high_school_world_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_world_history" +"description": "The following are multiple choice questions (with answers) about high school world history.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_high_school_world_history" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_human_aging.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_human_aging.yaml new file mode 100644 index 00000000..d70d5e85 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_human_aging.yaml @@ -0,0 +1,4 @@ +"dataset_name": "human_aging" +"description": "The following are multiple choice questions (with answers) about human aging.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_human_aging" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_human_sexuality.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_human_sexuality.yaml new file mode 100644 index 00000000..39751188 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_human_sexuality.yaml @@ -0,0 +1,4 @@ +"dataset_name": "human_sexuality" +"description": "The following are multiple choice questions (with answers) about human sexuality.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_human_sexuality" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_international_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_international_law.yaml new file mode 100644 index 00000000..03fab6ef --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_international_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "international_law" +"description": "The following are multiple choice questions (with answers) about international law.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_international_law" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_jurisprudence.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_jurisprudence.yaml new file mode 100644 index 00000000..bb6bfc6f --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_jurisprudence.yaml @@ -0,0 +1,4 @@ +"dataset_name": "jurisprudence" +"description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_jurisprudence" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_logical_fallacies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_logical_fallacies.yaml new file mode 100644 index 00000000..d57576cd --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_logical_fallacies.yaml @@ -0,0 +1,4 @@ +"dataset_name": "logical_fallacies" +"description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_logical_fallacies" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_machine_learning.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_machine_learning.yaml new file mode 100644 index 00000000..2c586922 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_machine_learning.yaml @@ -0,0 +1,4 @@ +"dataset_name": "machine_learning" +"description": "The following are multiple choice questions (with answers) about machine learning.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_machine_learning" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_management.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_management.yaml new file mode 100644 index 00000000..66b14f7f --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_management.yaml @@ -0,0 +1,4 @@ +"dataset_name": "management" +"description": "The following are multiple choice questions (with answers) about management.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_management" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_marketing.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_marketing.yaml new file mode 100644 index 00000000..aacee467 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_marketing.yaml @@ -0,0 +1,4 @@ +"dataset_name": "marketing" +"description": "The following are multiple choice questions (with answers) about marketing.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_marketing" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_medical_genetics.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_medical_genetics.yaml new file mode 100644 index 00000000..72d607fb --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_medical_genetics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "medical_genetics" +"description": "The following are multiple choice questions (with answers) about medical genetics.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_medical_genetics" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_miscellaneous.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_miscellaneous.yaml new file mode 100644 index 00000000..14db1ba8 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_miscellaneous.yaml @@ -0,0 +1,4 @@ +"dataset_name": "miscellaneous" +"description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_miscellaneous" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_moral_disputes.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_moral_disputes.yaml new file mode 100644 index 00000000..0beccf44 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_moral_disputes.yaml @@ -0,0 +1,4 @@ +"dataset_name": "moral_disputes" +"description": "The following are multiple choice questions (with answers) about moral disputes.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_moral_disputes" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_moral_scenarios.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_moral_scenarios.yaml new file mode 100644 index 00000000..4d884b63 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_moral_scenarios.yaml @@ -0,0 +1,4 @@ +"dataset_name": "moral_scenarios" +"description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_moral_scenarios" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_nutrition.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_nutrition.yaml new file mode 100644 index 00000000..ba1fdf61 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_nutrition.yaml @@ -0,0 +1,4 @@ +"dataset_name": "nutrition" +"description": "The following are multiple choice questions (with answers) about nutrition.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_nutrition" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_philosophy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_philosophy.yaml new file mode 100644 index 00000000..21645e77 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_philosophy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "philosophy" +"description": "The following are multiple choice questions (with answers) about philosophy.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_philosophy" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_prehistory.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_prehistory.yaml new file mode 100644 index 00000000..74d9f30c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_prehistory.yaml @@ -0,0 +1,4 @@ +"dataset_name": "prehistory" +"description": "The following are multiple choice questions (with answers) about prehistory.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_prehistory" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_accounting.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_accounting.yaml new file mode 100644 index 00000000..9010995f --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_accounting.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_accounting" +"description": "The following are multiple choice questions (with answers) about professional accounting.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_professional_accounting" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_law.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_law.yaml new file mode 100644 index 00000000..15fdad65 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_law" +"description": "The following are multiple choice questions (with answers) about professional law.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_professional_law" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_medicine.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_medicine.yaml new file mode 100644 index 00000000..1bcc6a9a --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_medicine" +"description": "The following are multiple choice questions (with answers) about professional medicine.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_professional_medicine" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_psychology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_psychology.yaml new file mode 100644 index 00000000..9144805c --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_professional_psychology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_psychology" +"description": "The following are multiple choice questions (with answers) about professional psychology.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_professional_psychology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_public_relations.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_public_relations.yaml new file mode 100644 index 00000000..0b4adc04 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_public_relations.yaml @@ -0,0 +1,4 @@ +"dataset_name": "public_relations" +"description": "The following are multiple choice questions (with answers) about public relations.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_public_relations" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_security_studies.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_security_studies.yaml new file mode 100644 index 00000000..2f4178f0 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_security_studies.yaml @@ -0,0 +1,4 @@ +"dataset_name": "security_studies" +"description": "The following are multiple choice questions (with answers) about security studies.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_security_studies" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_sociology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_sociology.yaml new file mode 100644 index 00000000..c583cf24 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_sociology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "sociology" +"description": "The following are multiple choice questions (with answers) about sociology.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_sociology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_us_foreign_policy.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_us_foreign_policy.yaml new file mode 100644 index 00000000..f41d3c27 --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_us_foreign_policy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "us_foreign_policy" +"description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_us_foreign_policy" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_virology.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_virology.yaml new file mode 100644 index 00000000..c2cafd9b --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_virology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "virology" +"description": "The following are multiple choice questions (with answers) about virology.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_virology" diff --git a/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_world_religions.yaml b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_world_religions.yaml new file mode 100644 index 00000000..b1d1de0f --- /dev/null +++ b/lm_eval/tasks/mmlu/flan_n_shot/mmlu_log_world_religions.yaml @@ -0,0 +1,4 @@ +"dataset_name": "world_religions" +"description": "The following are multiple choice questions (with answers) about world religions.\n\n" +"include": "_mmlu_flan_loglikelihood_template_yaml" +"task": "mmlu_flan_n_shot_loglikelihood_world_religions" -- GitLab From 4f5b72bc21249c267e19667e057b48b6f34f82bc Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 12:22:14 +0000 Subject: [PATCH 111/264] fixed template --- lm_eval/tasks/bbh/_generate_configs.py | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/boolean_expressions.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/causal_judgement.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/date_understanding.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/disambiguation_qa.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/dyck_languages.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/formal_fallacies.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/geometric_shapes.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/hyperbaton.yaml | 2 +- .../bbh/flan_cot_fewshot/logical_deduction_five_objects.yaml | 2 +- .../bbh/flan_cot_fewshot/logical_deduction_seven_objects.yaml | 2 +- .../bbh/flan_cot_fewshot/logical_deduction_three_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/movie_recommendation.yaml | 2 +- .../tasks/bbh/flan_cot_fewshot/multistep_arithmetic_two.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/navigate.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/object_counting.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/penguins_in_a_table.yaml | 2 +- .../bbh/flan_cot_fewshot/reasoning_about_colored_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/ruin_names.yaml | 2 +- .../flan_cot_fewshot/salient_translation_error_detection.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/snarks.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/sports_understanding.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/temporal_sequences.yaml | 2 +- .../tracking_shuffled_objects_five_objects.yaml | 2 +- .../tracking_shuffled_objects_seven_objects.yaml | 2 +- .../tracking_shuffled_objects_three_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/web_of_lies.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_fewshot/word_sorting.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/boolean_expressions.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/causal_judgement.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/date_understanding.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/disambiguation_qa.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/dyck_languages.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/formal_fallacies.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/geometric_shapes.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/hyperbaton.yaml | 2 +- .../bbh/flan_cot_zeroshot/logical_deduction_five_objects.yaml | 2 +- .../flan_cot_zeroshot/logical_deduction_seven_objects.yaml | 2 +- .../flan_cot_zeroshot/logical_deduction_three_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/movie_recommendation.yaml | 2 +- .../tasks/bbh/flan_cot_zeroshot/multistep_arithmetic_two.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/navigate.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/object_counting.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/penguins_in_a_table.yaml | 2 +- .../flan_cot_zeroshot/reasoning_about_colored_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/ruin_names.yaml | 2 +- .../salient_translation_error_detection.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/snarks.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/sports_understanding.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/temporal_sequences.yaml | 2 +- .../tracking_shuffled_objects_five_objects.yaml | 2 +- .../tracking_shuffled_objects_seven_objects.yaml | 2 +- .../tracking_shuffled_objects_three_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/web_of_lies.yaml | 2 +- lm_eval/tasks/bbh/flan_cot_zeroshot/word_sorting.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/boolean_expressions.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/causal_judgement.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/date_understanding.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/disambiguation_qa.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/dyck_languages.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/formal_fallacies.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/geometric_shapes.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/hyperbaton.yaml | 2 +- .../bbh/flan_fewshot/logical_deduction_five_objects.yaml | 2 +- .../bbh/flan_fewshot/logical_deduction_seven_objects.yaml | 2 +- .../bbh/flan_fewshot/logical_deduction_three_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/movie_recommendation.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/multistep_arithmetic_two.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/navigate.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/object_counting.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/penguins_in_a_table.yaml | 2 +- .../bbh/flan_fewshot/reasoning_about_colored_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/ruin_names.yaml | 2 +- .../bbh/flan_fewshot/salient_translation_error_detection.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/snarks.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/sports_understanding.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/temporal_sequences.yaml | 2 +- .../flan_fewshot/tracking_shuffled_objects_five_objects.yaml | 2 +- .../flan_fewshot/tracking_shuffled_objects_seven_objects.yaml | 2 +- .../flan_fewshot/tracking_shuffled_objects_three_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/web_of_lies.yaml | 2 +- lm_eval/tasks/bbh/flan_fewshot/word_sorting.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/boolean_expressions.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/causal_judgement.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/date_understanding.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/disambiguation_qa.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/dyck_languages.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/formal_fallacies.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/geometric_shapes.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/hyperbaton.yaml | 2 +- .../bbh/flan_zeroshot/logical_deduction_five_objects.yaml | 2 +- .../bbh/flan_zeroshot/logical_deduction_seven_objects.yaml | 2 +- .../bbh/flan_zeroshot/logical_deduction_three_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/movie_recommendation.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/multistep_arithmetic_two.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/navigate.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/object_counting.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/penguins_in_a_table.yaml | 2 +- .../bbh/flan_zeroshot/reasoning_about_colored_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/ruin_names.yaml | 2 +- .../flan_zeroshot/salient_translation_error_detection.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/snarks.yaml | 4 ++-- lm_eval/tasks/bbh/flan_zeroshot/sports_understanding.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/temporal_sequences.yaml | 2 +- .../flan_zeroshot/tracking_shuffled_objects_five_objects.yaml | 2 +- .../tracking_shuffled_objects_seven_objects.yaml | 2 +- .../tracking_shuffled_objects_three_objects.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/web_of_lies.yaml | 2 +- lm_eval/tasks/bbh/flan_zeroshot/word_sorting.yaml | 2 +- 109 files changed, 110 insertions(+), 110 deletions(-) diff --git a/lm_eval/tasks/bbh/_generate_configs.py b/lm_eval/tasks/bbh/_generate_configs.py index 8d805a64..ae2fb38f 100644 --- a/lm_eval/tasks/bbh/_generate_configs.py +++ b/lm_eval/tasks/bbh/_generate_configs.py @@ -68,7 +68,7 @@ if __name__ == "__main__": doc_to_text = doc_to_text + " Let's think step by step.\n" yaml_dict = { - "include": "_template_yaml", + "include": base_yaml_name, "task": f"bbh_{args.task_prefix}_{task}", "dataset_name": task, "description": description+"\n\n", diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/boolean_expressions.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/boolean_expressions.yaml index 849e0435..87f6e5af 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/boolean_expressions.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/boolean_expressions.yaml @@ -1,5 +1,5 @@ "dataset_name": "boolean_expressions" "description": "Evaluate the result of a random Boolean expression.\n\n" "doc_to_text": " not ( ( not not True ) ) is\nA: Let's think step by step.\nRemember that (i) expressions inside brackets are always evaluated first and that (ii) the order of operations from highest priority to lowest priority is \"not\", \"and\", \"or\", respectively.\nWe first simplify this expression \"Z\" as follows: \"Z = not ( ( not not True ) ) = not ( ( A ) )\" where \"A = not not True\".\nLet's evaluate A: A = not not True = not (not True) = not False = True.\nPlugging in A, we get: Z = not ( ( A ) ) = not ( ( True ) ) = not True = False. So the answer is False. True and False and not True and True is\nA: Let's think step by step.\nRemember that (i) expressions inside brackets are always evaluated first and that (ii) the order of operations from highest priority to lowest priority is \"not\", \"and\", \"or\", respectively.\nWe first simplify this expression \"Z\" as follows: \"Z = True and False and not True and True = A and B\" where \"A = True and False\" and \"B = not True and True\".\nLet's evaluate A: A = True and False = False.\nLet's evaluate B: B = not True and True = not (True and True) = not (True) = False.\nPlugging in A and B, we get: Z = A and B = False and False = False. So the answer is False. not not ( not ( False ) ) is\nA: Let's think step by step.\nRemember that (i) expressions inside brackets are always evaluated first and that (ii) the order of operations from highest priority to lowest priority is \"not\", \"and\", \"or\", respectively.\nWe first simplify this expression \"Z\" as follows: \"Z = not not ( not ( False ) ) = not not ( A )\" where \"A = not ( False )\".\nLet's evaluate A: A = not ( False ) = not False = True.\nPlugging in A, we get: Z = not not ( A ) = not not (True) = not not False = True. So the answer is True.Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_boolean_expressions" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/causal_judgement.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/causal_judgement.yaml index e7e80969..420320bc 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/causal_judgement.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/causal_judgement.yaml @@ -1,5 +1,5 @@ "dataset_name": "causal_judgement" "description": "Answer questions about causal attribution.\n\n" "doc_to_text": " How would a typical person answer each of the following questions about causation?\nFrank T., had an ongoing dispute with his neighbor over a stretch of land and one day decided to shoot his neighbor in the body. Frank T. had no experience with guns, his hand slipped on the barrel of the gun, and the shot went wild. Nonetheless, the bullet bounced off a large boulder several feet away and hit the neighbor's body, causing significant injury. Did Frank T. intentionally shoot his neighbor in the body?\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nHere in this question, we are told that \"Frank T. had no experience with guns, his hand slipped on the barrel of the gun, and the shot went wild.\" A typical person would assume that this passage suggests that Frank T. had no intention of shooting and injuring someone and that the bullet accidentally hit the neighbor's body; therefore, we conclude that Frank T. did not intentionally hit his neighbor. So the answer is No. How would a typical person answer each of the following questions about causation?\nSuzy and Billy are working on a project that is very important for our nation's security. The boss tells them both: \"Be sure that you are here at exactly 9 am. It is absolutely essential that you arrive at that time.\" Both Billy and Suzy arrive at 9 am. As it happens, there was a motion detector installed in the room where they arrived. The motion detector was set up to be triggered if at least one person appeared in the room at the same time. So the motion detector went off. Did Billy cause the motion detector to go off?\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nHere in this question, we are told that the boss ordered them both to arrive at the meeting room at the same time and that the motion detector was set up to be triggered if at least one person appeared in the room at the same time.\" A typical person would assume that the person probably meant to say the detector was set up to be triggered if \"both persons\" appeared in the room at the same time, not at least one person, since otherwise the phrase \"at the same time\" would not make much sense in that sentence. Because the motion detector went off, a typical person would therefore come to the conclusion that both Suzy and Billy triggered the motion detector to go off; hence, Billy did indeed cause the motion detector to go off. So the answer is Yes. How would a typical person answer each of the following questions about causation?\nGeorge and his sister Lena reunite at their parents' house for Thanksgiving. Whereas George just got into medical school, Lena is unhappy in her marriage and recently lost her job. Over the course of the day, George and Lena get into a number of heated arguments. Later in the afternoon they play a game of darts. They split the first two games, and the third game is close until the end. Who will win comes down to George's last shot. If he hits a high point region, he wins; if he hits a low point region, Lena wins. George thinks of the difficult time Lena is having, and he really wants to let her win. He aims the dart at the low point region. He sets up his shot and the dart lands in the low point region. After his shot, Lena wins the game and is very happy. Did George hit the low point region intentionally?\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nHere in this question, we are told that \"He aims the dart at the low point region.\" A typical person might therefore think George did intentionally hit the low point region, because he wanted to lift up the spirit of his sister Lena. So the answer is Yes.Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_causal_judgement" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/date_understanding.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/date_understanding.yaml index d5dc2117..c88d8fe0 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/date_understanding.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/date_understanding.yaml @@ -1,5 +1,5 @@ "dataset_name": "date_understanding" "description": "Infer the date from context.\n\n" "doc_to_text": " Today is Christmas Eve of 1937. What is the date 10 days ago in MM/DD/YYYY?\nOptions:\n(A) 12/14/2026\n(B) 12/14/1950\n(C) 12/14/2007\n(D) 12/14/1937\n(E) 07/14/1938\n(F) 12/14/1988\nA: Let's think step by step.\nIf today is Christmas Eve of 1937, then today's date is December 24, 1937. 10 days before today is December 14, 1937, that is 12/14/1937. So the answer is (D). Tomorrow is 11/12/2019. What is the date one year ago from today in MM/DD/YYYY?\nOptions:\n(A) 09/04/2018\n(B) 11/11/2018\n(C) 08/25/2018\n(D) 11/02/2018\n(E) 11/04/2018\nA: Let's think step by step.\nIf tomorrow is 11/12/2019, then today is 11/11/2019. The date one year ago from today is 11/11/2018. So the answer is (B). Jane and John married on Jan 2, 1958. It is their 5-year anniversary today. What is the date tomorrow in MM/DD/YYYY?\nOptions:\n(A) 01/11/1961\n(B) 01/03/1963\n(C) 01/18/1961\n(D) 10/14/1960\n(E) 01/03/1982\n(F) 12/03/1960\nA: Let's think step by step.\nIf Jane and John married on Jan 2, 1958, then and if it is their 5-year anniversary today, then today's date is Jan 2, 1963. The date tomorrow is Jan 3, 1963, that is 01/03/1963. So the answer is (B).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_date_understanding" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/disambiguation_qa.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/disambiguation_qa.yaml index 2d08474c..8dc5db9e 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/disambiguation_qa.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/disambiguation_qa.yaml @@ -1,5 +1,5 @@ "dataset_name": "disambiguation_qa" "description": "Clarify the meaning of sentences with ambiguous pronouns.\n\n" "doc_to_text": " In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: The chief told the counselor that they took the day off.\nOptions:\n(A) The chief took the day off\n(B) The counselor took the day off\n(C) Ambiguous\nA: Let's think step by step.\nHere we need to determine who the pronoun \"they\" might be referring to. There are two possible referents for \"they\", namely the chief and the counselor. The verb \"told\" might be able to help us determine which one is more likely (if either). Let X be the chief and Y the counselor. The sentence is then of the form \"X told Y that (X or Y) did something.\"\nLet's consider Y first: \"X told Y that Y did something.\" This case does not make much sense, as Y would already have the information that Y did something, because it is information about themself.\nNow, consider X: \"X told Y that X did something.\" This makes sense, because X would be sharing some information about themself that Y might not have known before.\nBecause in this context, X is the chief and Y is the counselor, the answer should be the chief. So the answer is (A). In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: The manager sent a message to the secretary, but he didn't reply yet.\nOptions:\n(A) The secretary didn't reply yet\n(B) The manager didn't reply yet\n(C) Ambiguous\nA: Let's think step by step.\nHere we need to determine who the pronoun \"he\" might be referring to. There are two possible referents for \"he\", namely the manager and the secretary. The verbs \"sent\" and \"reply\" might be able to help us determine which one is more likely (if either). Let X be the manager and Y the secretary. The sentence is then of the form \"X sent a message to Y, but (X or Y) didn't reply yet.\"\nLet's consider Y first: \"X sent a message to Y, but Y didn't reply yet.\" This case makes sense, because of the implicit causality of the sentence. Y was the receiver of the message, but Y didn't get back to X yet.\nNow, consider X: \"X sent a message to Y, but X didn't reply yet.\" This case doesn't make sense, because X was the initial sender of the message, so it is now Y's turn to write back to X.\nBecause in this context, X is the manager and Y is the secretary, the answer should be the secretary. So the answer is (A). In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: Bailey will plan to meet the director at his office\nOptions:\n(A) It will be Bailey's office\n(B) It will be the director's office\n(C) Ambiguous\nA: Let's think step by step.\nHere we need to determine who the pronoun \"his\" might be referring to. There are two possible referents for \"his\", namely Bailey's and the director's. The verb phrase \"plan to meet\" might be able to help us determine which one is more likely (if either). Let X be Bailey and Y the director. The sentence is then of the form \"X will plan to meet Y at (X or Y)'s office.\"\nLet's consider Y first: \"X will plan to meet Y at Y's office.\" This case makes sense, because X might want to meet up with Y at Y's office.\nNow, consider X: \"X will plan to meet Y at X's office.\" This case also makes sense, because X might want to meet up with Y at X's own office.\nBecause both X and Y are possible at the same time, we conclude that the antecedent of the pronoun is ambiguous. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_disambiguation_qa" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/dyck_languages.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/dyck_languages.yaml index f15bda16..f82f70db 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/dyck_languages.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/dyck_languages.yaml @@ -1,5 +1,5 @@ "dataset_name": "dyck_languages" "description": "Correctly close a Dyck-n word.\n\n" "doc_to_text": " Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: [ { [\nA: Let's think step by step.\nWe should process each input one by one and keep track of the stack configuration.\n0: empty stack\n1: [ ; stack: [\n2: { ; stack: [ {\n3: [ ; stack: [ { [\nNow, we have reached the end. The final stack is \"[ { [\".\nWe will need to pop out \"[\", \"{\", \"[\" one by one in that order.\nSo, we need \"]\", \"}\", \"]\". So the answer is ] } ]. Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < > ( ( [ [ ( { } ) [ < > ] ]\nA: Let's think step by step.\nWe should process each input one by one and keep track of the stack configuration.\n0: empty stack\n1: < ; stack: <\n2: > ; stack: empty\n3: ( ; stack: (\n4: ( ; stack: ( (\n5: [ ; stack: ( ( [\n6: [ ; stack: ( ( [ [\n7: ( ; stack: ( ( [ [ (\n8: { ; stack: ( ( [ [ ( {\n9: } ; stack: ( ( [ [ (\n10: ) ; stack: ( ( [ [\n11: [ ; stack: ( ( [ [ [\n12: < ; stack: ( ( [ [ [ <\n13: > ; stack: ( ( [ [ [\n14: ] ; stack: ( ( [ [\n15: ] ; stack: ( ( [\nNow, we have reached the end. The final stack is \"( ( [\".\nWe will need to pop out \"[\", \"(\", \"(\" one by one in that order.\nSo, we need \"]\", \")\", \")\". So the answer is ] ) ). Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < [ < [ { < [ ] < { } > > } ] > { { ( ) } { < [ < > ] > }\nA: Let's think step by step.\nWe should process each input one by one and keep track of the stack configuration.\n0: empty stack\n1: < ; stack: <\n2: [ ; stack: < [\n3: < ; stack: < [ <\n4: [ ; stack: < [ < [\n5: { ; stack: < [ < [ {\n6: < ; stack: < [ < [ { <\n7: [ ; stack: < [ < [ { < [\n8: ] ; stack: < [ < [ { <\n9: < ; stack: < [ < [ { < <\n10: { ; stack: < [ < [ { < < {\n11: } ; stack: < [ < [ { < <\n12: > ; stack: < [ < [ { <\n13: > ; stack: < [ < [ {\n14: } ; stack: < [ < [\n15: ] ; stack: < [ <\n16: > ; stack: < [\n17: { ; stack: < [ {\n18: { ; stack: < [ { {\n19: ( ; stack: < [ { { (\n20: ) ; stack: < [ { {\n21: } ; stack: < [ {\n22: { ; stack: < [ { {\n23: < ; stack: < [ { { <\n24: [ ; stack: < [ { { < [\n25: < ; stack: < [ { { < [ <\n26: > ; stack: < [ { { < [\n27: ] ; stack: < [ { { <\n28: > ; stack: < [ { {\n29: } ; stack: < [ {\nNow, we have reached the end. The final stack is \"< [ {\".\nWe will need to pop out \"{\", \"[\", \"<\" one by one in that order.\nSo, we need \"}\", \"]\", \">\". So the answer is } ] >.Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_dyck_languages" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/formal_fallacies.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/formal_fallacies.yaml index 7e28e84e..bbace29e 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/formal_fallacies.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/formal_fallacies.yaml @@ -1,5 +1,5 @@ "dataset_name": "formal_fallacies" "description": "Distinguish deductively valid arguments from formal fallacies.\n\n" "doc_to_text": " \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: To begin with, Lesley is a close friend of Fernando. Moreover, being a close friend of Fernando or a schoolmate of Lowell is sufficient for being a great-grandfather of Leroy. It follows that Lesley is a great-grandfather of Leroy.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: Let's think step by step.\n(1) Lesley is a close friend of Fernando: Lesley = friend(Fernando).\n(2) Being a close friend of Fernando or a schoolmate of Lowell is sufficient for being a great-grandfather of Leroy: If X = friend(Fernando) OR SCHOOLMATE(Lowell), then X = great-grandfather(Leroy).\nHypothesis: Does it follow that Lesley is a great-grandfather of Leroy: Lesley = great-grandfather(Leroy)?\nLet’s see whether the Hypothesis can be deduced from the arguments (1) and (2) by logical reasoning?\nBy (1), we have Lesley = friend(Fernando). By (2), we have if Lesley = friend(Fernando), then Lesley = great-grandfather(Leroy).\nSo, it is true that Lesley is a great-grandfather of Leroy. So the answer is valid. \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: Whoever is not a great-grandfather of Clyde is a stepbrother of Brian. Being an ancestor of Dana is sufficient for not being a great-grandfather of Clyde. We may conclude: Everyone who is an ancestor of Dana is a stepbrother of Brian, too.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: Let's think step by step.\n(1) Whoever is not a great-grandfather of Clyde is a stepbrother of Brian: If X = NOT (great-grandfather(Clyde)), then X = stepbrother(Brian).\n(2): Being an ancestor of Dana is sufficient for not being a great-grandfather of Clyde: If X = ancestor(Dana), X = NOT (great-grandfather(Clyde)).\nHypothesis: Does it follow that everyone who is an ancestor of Dana is a stepbrother of Brian, too: If X = ancestor(Dana), then X = stepbrother(Brian)?\nLet’s see whether the Hypothesis can be deduced from the arguments (1) and (2) by logical reasoning?\nBy (2), we have if X = ancestor(Dana), X = NOT (great-grandfather(Clyde)).\nFurthermore, by (1), we have if X = NOT (great-grandfather(Clyde)), then X = stepbrother(Brian).\nBy the transitive relation rule in first-order logic, we then have: if X = ancestor(Dana), then X = stepbrother(Brian).\nSo, it is true that everyone who is an ancestor of Dana is a stepbrother of Brian. So the answer is valid. \"It is not always easy to grasp who is consuming which products. The following argument pertains to this question: Every infrequent user of Paul Mitchell shampoo is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap, or both. No regular consumer of Lush soap is a rare consumer of Nioxin shampoo and, in the same time, a loyal buyer of Caress soap. It follows that whoever is an infrequent user of Paul Mitchell shampoo is not a regular consumer of Lush soap.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: Let's think step by step.\n(1) Every infrequent user of Paul Mitchell shampoo is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap, or both: If X = infrequent-user(Paul Mitchell), then X = rare-consumer(Nioxin) OR X = loyal-buyer(Caress).\n(2): No regular consumer of Lush soap is a rare consumer of Nioxin shampoo and a loyal buyer of Caress soap at the same time. If X = regular-consumer(Lush), then X = NOT (rare-consumer(Nioxin) AND loyal-buyer(Caress)).\nHypothesis: Does it follow that whoever is an infrequent user of Paul Mitchell shampoo is not a regular consumer of Lush soap: If X = infrequent-user(Paul Mitchell), then X = NOT (regular-consumer(Lush))?\nLet’s see whether the Hypothesis can be deduced from the arguments (1) and (2) by logical reasoning?\nBy (1), we have if X = infrequent-user(Paul Mitchell), then X = rare-consumer(Nioxin) OR X = loyal-buyer(Caress). We need to consider both cases separately:\nThe case X = rare-consumer(Nioxin) does not appear in (2).\nThe case X = loyal-buyer(Caress) does not appear in (2), either.\nSo, from (1) and (2), we cannot necessarily deduce the Hypothesis. So the answer is invalid.Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_formal_fallacies" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/geometric_shapes.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/geometric_shapes.yaml index 07844a53..6c3141aa 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/geometric_shapes.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/geometric_shapes.yaml @@ -1,5 +1,5 @@ "dataset_name": "geometric_shapes" "description": "Name geometric shapes from their SVG paths.\n\n" "doc_to_text": " This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 9 separate commands.\n(1) M 31.00,73.00: Move the current point to 31.00,73.00.\n(2) L 32.00,59.00: Create a line from 31.00,73.00 to 32.00,59.00.\n(3) L 44.00,50.00: Create a line from 32.00,59.00 to 44.00,50.00.\n(4) L 49.00,41.00: Create a line from 44.00,50.00 to 49.00,41.00.\n(5) L 64.00,37.00: Create a line from 49.00,41.00 to 64.00,37.00.\n(6) L 71.00,55.00: Create a line from 64.00,37.00 to 71.00,55.00.\n(7) L 64.00,76.00: Create a line from 71.00,55.00 to 64.00,76.00.\n(8) L 52.00,61.00: Create a line from 64.00,76.00 to 52.00,61.00.\n(9) L 31.00,73.00: Create a line from 52.00,61.00 to 31.00,73.00.\nThis SVG path starts at point 31.00,73.00, creates eight consecutive and touching lines, and then returns back its starting point, thereby creating an eight-sided shape. It does not have any curves or arches. \"octagon\" is the only eight-sided object on the list. So the answer is (F). This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 6 separate commands.\n(1) M 14.19,26.04: Move the current point to 14.19,26.04.\n(2) L 51.43,39.21: Create a line from 14.19,26.04 to 51.43,39.21.\n(3) L 58.44,36.69: Create a line from 51.43,39.21 to 58.44,36.69.\n(4) L 56.63,30.17: Create a line from 58.44,36.69 to 56.63,30.17.\n(5) L 48.53,26.66: Create a line from 56.63,30.17 to 48.53,26.66.\n(6) L 14.19,26.04: Create a line from 48.53,26.66 to 14.19,26.04.\nThis SVG path starts at point 14.19,26.04, creates five consecutive and touching lines, and then returns back its starting point, thereby creating a five-sided shape. It does not have any curves or arches. \"pentagon\" is the only five-sided polygon on the list. So the answer is (G). This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 5 separate commands.\n(1) M 41.00,43.00: Move the current point to 41.00,43.00.\n(2) L 37.00,34.00: Create a line from 41.00,43.00 to 37.00,34.00.\n(3) L 41.00,33.00: Create a line from 37.00,34.00 to 41.00,33.00.\n(4) L 45.00,34.00: Create a line from 41.00,33.00 to 45.00,34.00.\n(5) L 41.00,43.00: Create a line from 45.00,34.00 to 41.00,43.00.\nThis SVG path starts at point 41.00,43.00, creates four consecutive and touching lines, and then returns back its starting point, thereby creating a four-sided shape. \"kite\" and \"rectangle\" are the only two four-sided polygons on the list. So, we need to determine which one is the correct answer.\nA kite has two pairs of equal-length adjacent sides, whereas a rectangle has two pairs of equal-length alternate (opposite) sides. Now, let's check whether the two adjacent sides of this shape are equal.\nLength of side A: |A| = sqrt((41.00-37.00)^2 + (43.00-34.00)^2) = sqrt((4)^2 + (9)^2) = sqrt(16 + 81) = sqrt(97).\nLength of side B: |B| = sqrt((37.00-41.00)^2 + (34.00-33.00)^2)) = sqrt((4)^2 + (1)^2) = sqrt(16 + 1) = sqrt(17).\nLength of side C: |C| = sqrt((41.00-45.00)^2 + (33.00-34.00)^2)) = sqrt((-4)^2 + (-1)^2) = sqrt(16 + 1) = sqrt(17).\nLength of side D: |D| = sqrt((45.00-41.00)^2 + (34.00-43.00)^2)) = sqrt((4)^2 + (-9)^2) = sqrt(16 + 81) = sqrt(97).\nNote that |A| = |D| and |B| = |C|. Furthermore, A and D are adjacent and B and C are adjacent. Thus, this polygon has two pairs of equal-length adjacent sides and is \"kite\". So the answer is (D).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_geometric_shapes" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/hyperbaton.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/hyperbaton.yaml index 2dc997cc..f371f45c 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/hyperbaton.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/hyperbaton.yaml @@ -1,5 +1,5 @@ "dataset_name": "hyperbaton" "description": "Order adjectives correctly in English sentences.\n\n" "doc_to_text": " Which sentence has the correct adjective order:\nOptions:\n(A) rubber terrible ship\n(B) terrible rubber ship\nA: Let's think step by step.\nWhen there is more than one adjective before a noun, the adjectives need to respect the following order before a noun: \"[1. opinion] [2. size] [3. age] [4. shape] [5. color] [6. origin] [7. material] [8. purpose] noun\".\nOption (A): \"rubber terrible ship\". (1) rubber\" falls into the material category. (2) \"terrible\" falls into the opinion category. Option (A) has the following adjective order: [7. material] [1. opinion] (or, in numeric terms, 7 1). Because 7 < 1 is not correct, (A) does not have the correct ordering.\nOption (B): \"terrible rubber ship\". Option (B) has the following adjective order: [1. opinion] [7. material] (or, in numeric terms, 1 7). Because 1 < 7 is correct, (B) has the correct ordering. So the answer is (B). Which sentence has the correct adjective order:\nOptions:\n(A) repulsive small Brazilian exercise ship\n(B) Brazilian repulsive exercise small ship\nA: Let's think step by step.\nWhen there is more than one adjective before a noun, the adjectives need to respect the following order before a noun: \"[1. opinion] [2. size] [3. age] [4. shape] [5. color] [6. origin] [7. material] [8. purpose] noun\".\nOption (A): \"repulsive small Brazilian exercise ship\". (1) \"repulsive\" falls into the opinion category. (2) \"small\" falls into the size category. (3) \"Brazilian\" falls into the origin category. (4) \"exercise\" falls into the purpose category. Option (A) has the following adjective order: [1. opinion] [2. size] [6. origin] [8. purpose] (or, in numeric terms, 1 2 6 8). Because 1 < 2 < 6 < 8 is correct, (A) has the correct ordering.\nOption (B): \"Brazilian repulsive exercise small ship\". Option (B) has the following adjective order: [6. origin] [1. opinion] [8. purpose] [2. size] (or, in numeric terms, 6 1 8 2). Because 6 < 1 < 8 < 2 is not correct, (B) does not have the correct ordering. So the answer is (A). Which sentence has the correct adjective order:\nOptions:\n(A) blue gold wonderful square shoe\n(B) wonderful square blue gold shoe\nA: Let's think step by step.\nWhen there is more than one adjective before a noun, the adjectives need to respect the following order before a noun: \"[1. opinion] [2. size] [3. age] [4. shape] [5. color] [6. origin] [7. material] [8. purpose] noun\".\nOption (A): \"blue gold wonderful square shoe\". (1) \"blue\" falls into the color category. (2) \"gold\" falls into the material category. (3) \"wonderful\" falls into the opinion category. (4) \"square\" falls into the shape category. The adjective order that Option (A) has is [5. color] [7. material] [1. opinion] [4. shape] (or, in numeric terms, 5 7 1 4). Because 5 < 7 < 1 < 4 is not correct, (A) does not have the correct ordering.\nOption (B): \"wonderful square blue gold shoe\". Option (B) has the following adjective order: [1. opinion] [4. shape] [5. color] [7. material] (or, in numeric terms, 1 4 5 7 ). Because 1 < 4 < 5 < 7 is correct, (B) has the correct ordering. So the answer is (B).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_hyperbaton" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_five_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_five_objects.yaml index b54bac3b..7e6fe33c 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_five_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_five_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_five_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": " The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: Let's think step by step.\n(1) Eve finished above Amy: \"(above) ? Eve ? Amy ? (below)\".\n(2) Eli finished below Amy: \"(above) ? Amy ? Eli ? (below)\".\n(3) Combining (1) and (2) we get the following ordering: \"(above) Eve Amy Eli (below)\".\nAccording to this ordering, the person who finished last (the one at the bottom of this list) is Eli.\nEli finished last. So the answer is (B). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: Let's think step by step.\n(1) The green book is to the right of the white book: \"(left) ? white ? green ? (right)\".\n(2) The orange book is the rightmost: \"(left) ? white ? green orange (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white green orange (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (A). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: Let's think step by step.\n(1) The white book is to the left of the gray book: \"(left) ? white ? gray ? (right)\".\n(2) The red book is the second from the left: \"(left) ? white red gray ? (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white red gray (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_logical_deduction_five_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_seven_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_seven_objects.yaml index f4799ae6..9f04d580 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_seven_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_seven_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_seven_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": " The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: Let's think step by step.\n(1) Eve finished above Amy: \"(above) ? Eve ? Amy ? (below)\".\n(2) Eli finished below Amy: \"(above) ? Amy ? Eli ? (below)\".\n(3) Combining (1) and (2) we get the following ordering: \"(above) Eve Amy Eli (below)\".\nAccording to this ordering, the person who finished last (the one at the bottom of this list) is Eli.\nEli finished last. So the answer is (B). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: Let's think step by step.\n(1) The green book is to the right of the white book: \"(left) ? white ? green ? (right)\".\n(2) The orange book is the rightmost: \"(left) ? white ? green orange (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white green orange (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (A). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: Let's think step by step.\n(1) The white book is to the left of the gray book: \"(left) ? white ? gray ? (right)\".\n(2) The red book is the second from the left: \"(left) ? white red gray ? (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white red gray (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_logical_deduction_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_three_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_three_objects.yaml index cbc0dbd1..4affd9b7 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_three_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/logical_deduction_three_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_three_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": " The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: Let's think step by step.\n(1) Eve finished above Amy: \"(above) ? Eve ? Amy ? (below)\".\n(2) Eli finished below Amy: \"(above) ? Amy ? Eli ? (below)\".\n(3) Combining (1) and (2) we get the following ordering: \"(above) Eve Amy Eli (below)\".\nAccording to this ordering, the person who finished last (the one at the bottom of this list) is Eli.\nEli finished last. So the answer is (B). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: Let's think step by step.\n(1) The green book is to the right of the white book: \"(left) ? white ? green ? (right)\".\n(2) The orange book is the rightmost: \"(left) ? white ? green orange (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white green orange (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (A). The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: Let's think step by step.\n(1) The white book is to the left of the gray book: \"(left) ? white ? gray ? (right)\".\n(2) The red book is the second from the left: \"(left) ? white red gray ? (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white red gray (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_logical_deduction_three_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/movie_recommendation.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/movie_recommendation.yaml index 573b3222..4ba1454b 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/movie_recommendation.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/movie_recommendation.yaml @@ -1,5 +1,5 @@ "dataset_name": "movie_recommendation" "description": "Recommend movies similar to the given list of movies.\n\n" "doc_to_text": " Find a movie similar to Star Wars Episode IV - A New Hope, Indiana Jones and the Last Crusade, Star Wars Episode V - The Empire Strikes Back, The Big Lebowski:\nOptions:\n(A) Tetsuo\n(B) the Ironman\n(C) The Princess Bride\n(D) The Barkley Marathons The Race That Eats Its Young\n(E) Bug\nA: Let's think step by step.\n- Star Wars Episode IV - A New Hope (action, adventure, fantasy; 1977)\n- Indiana Jones and the Last Crusade (action, adventure; 1989)\n- Star Wars Episode V - The Empire Strikes Back (action, adventure, fantasy; 1980)\n- The Big Lebowski (action, drama, comedy; 1998)\nThese are all famous classic American movies produced before 2000. Amongst all the options, the only movie similar to these ones seems to be The Princess Bride (1987). So the answer is (C). Find a movie similar to Twister, The Silence of the Lambs, Independence Day, Braveheart:\nOptions:\n(A) They Shoot Horses\n(B) Don't They\n(C) Forrest Gump\n(D) The Salton Sea\n(E) Extreme Days\nA: Let's think step by step.\n- Twister (action, adventure, thriller; 1996)\n- The Silence of the Lambs (crime, drama, thriller; 1991)\n- Independence Day (action, science-fiction, drama; 1996)\n- Braveheart (biography, drama, epic; 1995)\nThese are all famous Hollywood movies produced around the 1990s. Amongst all the options, the only movie similar to these ones seems to be Forrest Gump (comedy, drama, romance; 1994). So the answer is (C). Find a movie similar to Minority Report, Total Recall, Inside Out, Forrest Gump:\nOptions:\n(A) Phenomena\n(B) Lilting\n(C) Catwoman\n(D) Edge of Tomorrow\nA: Let's think step by step.\n- Minority Report (action, crime, mystery; 2002)\n- Total Recall (action, adventure, science-fiction; 2012)\n- Inside Out (animation, family, comedy; 2015)\n- Forrest Gump (comedy, drama, romance; 1994)\nThese are all famous movies produced in the past few decades.Amongst all the options, the only movie similar to these ones seems to be Edge of Tomorrow (action, adventure, crime, mystery; 2014), as it is also a science-fiction movie and features Tom Cruise. So the answer is (D).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_movie_recommendation" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/multistep_arithmetic_two.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/multistep_arithmetic_two.yaml index 21f34e7a..f92d96e7 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/multistep_arithmetic_two.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/multistep_arithmetic_two.yaml @@ -1,5 +1,5 @@ "dataset_name": "multistep_arithmetic_two" "description": "Solve multi-step arithmetic problems.\n\n" "doc_to_text": " ((-5 + 9 * -4 - 0) * (4 + -7 + 0 * -5)) =\nA: Let's think step by step.\nLet’s recall that the order of operations in mathematics is as follows: (1) Parentheses, (2) exponents, (3) multiplication and division (from left to right), (4) addition and multiplication (from left to right). So, remember to always compute the expressions inside parentheses or brackets first.\nThis equation can be written as \"A * B\", where A = (-5 + 9 * -4 - 0) and B = (4 + -7 + 0 * -5).\nLet's calculate A = (-5 + 9 * -4 - 0) = (-5 + (9 * -4) - 0) = (-5 + (-36) - 0) = (-5 + -36 - 0) = -5 - 36 = -41.\nLet's calculate B = (4 + -7 + 0 * -5) = (4 + -7 + (0 * -5)) = (4 + -7 + 0) = (4 + -7) = (4 - 7) = -3.\nThen, the final equation is A * B = -41 * -3 = (-61) * (-3) = 123. So the answer is 123. ((-9 * 7 * 7 * -9) + (4 * -9 - 8 - -4)) =\nA: Let's think step by step.\nLet’s recall that the order of operations in mathematics is as follows: (1) Parentheses, (2) exponents, (3) multiplication and division (from left to right), (4) addition and multiplication (from left to right). So, remember to always compute the expressions inside parentheses or brackets first.\nThis equation can be written as \"A + B\", where A = (-9 * 7 * 7 * -9) and B = (4 * -9 - 8 - -4).\nLet's calculate A = (-9 * 7 * 7 * -9) = ((-9 * 7) * (7 * -9)) = ((-63) * (-63)) = 3969.\nLet's calculate B = (4 * -9 - 8 - (-4)) = ((4 * -9) - 8 - (-4)) = ((-36) - 8 - (-4)) = ((-36 - 8) - (-4)) = (-44 - (-4)) = -40.\nThen, the final equation is A + B = 3969 + -40 = 3969 - 40 = 3929. So the answer is 3929. ((-3 + 5 * 8 * -4) - (9 - 8 * -7 + -9)) =\nA: Let's think step by step.\nLet’s recall that the order of operations in mathematics is as follows: (1) Parentheses, (2) exponents, (3) multiplication and division (from left to right), (4) addition and multiplication (from left to right). So, remember to always compute the expressions inside parentheses or brackets first.\nThis equation can be written as \"A - B\", where A = (-3 + 5 * 8 * -4) and B = (9 - 8 * -7 + -9).\nLet's calculate A = (-3 + 5 * 8 * -4) = (-3 + (5 * 8) * -4) = (-3 + (40) * -4) = (-3 + (40 * -4)) = (-3 + -160) = -163.\nLet's calculate B = (9 - 8 * -7 + -9) = (9 - (8 * -7) + -9) = (9 - (-56) + -9) = ((9 - (-56)) + -9) = ((65) + -9)= (65 - 9) = 56.\nThen, the final equation is A - B = -163 - 56 = -219. So the answer is -219.Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_multistep_arithmetic_two" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/navigate.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/navigate.yaml index be3cd234..37923249 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/navigate.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/navigate.yaml @@ -1,5 +1,5 @@ "dataset_name": "navigate" "description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n" "doc_to_text": " If you follow these instructions, do you return to the starting point? Turn left. Turn around. Turn left. Take 7 steps. Take 2 steps. Take 4 steps. Take 8 steps.\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nWe start at the origin (0, 0), facing the positive y-axis.\n(1) Turn left: (0, 0), facing the negative x-axis.\n(2) Turn around: (0, 0), facing the positive x-axis.\n(3) Turn left: (0, 0), facing the positive y-axis.\n(4) Take 7 steps: (0, 7), facing the positive y-axis.\n(5) Take 2 steps: (0, 9), facing the positive y-axis.\n(6) Take 4 steps: (0, 13), facing the positive y-axis.\n(7) Take 8 steps: (0, 21), facing the positive y-axis.\nSince (0, 21) is not (0, 0), we are not where we started. So the answer is No. If you follow these instructions, do you return to the starting point? Turn around. Take 1 step. Take 6 steps. Turn around. Take 6 steps. Take 9 steps. Take 1 step.\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nWe start at the origin (0, 0), facing the positive y-axis.\n(1) Turn around: (0, 0), facing the negative y-axis.\n(2) Take 1 step: (0, -1), facing the negative y-axis.\n(3) Take 6 steps: (0, -7), facing the negative y-axis.\n(4) Turn around: (0, -7), facing the positive y-axis.\n(5) Take 6 steps: (0, -1), facing the positive y-axis.\n(6) Take 9 steps: (0, 8), facing the positive y-axis.\n(7) Take 1 step: (0, 9), facing the positive y-axis.\nSince (0, 9) is not (0, 0), we are not where we started. So the answer is No. If you follow these instructions, do you return to the starting point? Always face forward. Take 2 steps right. Take 9 steps left. Take 7 steps right.\nOptions:\n- Yes\n- No\nA: Let's think step by step.\nWe start at the origin (0, 0), facing the positive y-axis.\n(1) Always face forward: (0, 0), facing the positive y-axis.\n(2) Take 2 steps right: (0, 2), facing the positive y-axis.\n(3) Take 9 steps left: (0, -7), facing the positive y-axis.\n(4) Take 7 steps right: (0, 7), facing the positive y-axis.\nSince (0, 0) is (0, 0), we are indeed where we started. So the answer is Yes.Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_navigate" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/object_counting.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/object_counting.yaml index 767d414f..e6f2635b 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/object_counting.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/object_counting.yaml @@ -1,5 +1,5 @@ "dataset_name": "object_counting" "description": "Questions that involve enumerating objects and asking the model to count them.\n\n" "doc_to_text": " I have a blackberry, a clarinet, a nectarine, a plum, a strawberry, a banana, a flute, an orange, and a violin. How many fruits do I have?\nA: Let's think step by step.\nWe first identify the fruits on the list and include their quantity in parentheses:\n- blackberry (1)\n- nectarine (1)\n- plum (1)\n- strawberry (1)\n- banana (1)\n- orange (1)\nNow, let's add the numbers in parentheses: 1 + 1 + 1 + 1 + 1 + 1 = 6. So the answer is 6. I have an orange, a raspberry, two peaches, a blackberry, an apple, a grape, a nectarine, and three plums. How many fruits do I have?\nA: Let's think step by step.\nWe first identify the fruits on the list and include their quantity in parentheses:\n- orange (1)\n- raspberry (1)\n- peaches (2)\n- blackberry (1)\n- apple (1)\n- grape (1)\n- nectarine (1)\n- plums (3)\nNow, let's add the numbers in parentheses: 1 + 1 + 2 + 1 + 1 + 1 + 1 + 3 = 11. So the answer is 11. I have a lettuce head, a head of broccoli, an onion, a stalk of celery, two carrots, a garlic, and a yam. How many vegetables do I have?\nA: Let's think step by step.\nWe first identify the vegetables on the list and include their quantity in parentheses:\n- lettuce (1)\n- broccoli (1)\n- onion (1)\n- celery (1)\n- carrots (2)\n- garlic (1)\n- yam (1)\nNow, let's add the numbers in parentheses: 1 + 1 + 1 + 1 + 2 + 1 + 1 = 8. So the answer is 8.Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_object_counting" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/penguins_in_a_table.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/penguins_in_a_table.yaml index 439c5f45..5b7228f4 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/penguins_in_a_table.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/penguins_in_a_table.yaml @@ -1,5 +1,5 @@ "dataset_name": "penguins_in_a_table" "description": "Answer questions about a table of penguins and their attributes.\n\n" "doc_to_text": " Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. We now add a penguin to the table:\nJames, 12, 90, 12\nHow many penguins are less than 8 years old?\nOptions:\n(A) 1\n(B) 2\n(C) 3\n(D) 4\n(E) 5\nA: Let's think step by step.\nThis question focuses on age. We know the following: Louis is 7 years old, Bernard is 5 years old, Vincent is 9 years old, and Gwen is 8 years old.\nNow, we add James to this table: James is 12 years old.\nThe penguins that are less than 8 years old are Louis and Bernard.\nThere are 2 penguins less than 8 years old. So the answer is (B). Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. Which is the youngest penguin?\nOptions:\n(A) Louis\n(B) Bernard\n(C) Vincent\n(D) Gwen\n(E) James\nA: Let's think step by step.\nThis question focuses on age. We know the following: Louis is 7 years old, Bernard is 5 years old, Vincent is 9 years old, and Gwen is 8 years old.\nAccording to the table, Bernard (5) is the youngest amongst them.\nThe youngest penguin is Bernard. So the answer is (B). Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. What is the name of the second penguin sorted by alphabetic order?\nOptions:\n(A) Louis\n(B) Bernard\n(C) Vincent\n(D) Gwen\n(E) James\nA: Let's think step by step.\nThis question focuses on the name. We know the following: The names of the penguin in the table are Louis, Bernard, Vincent, and Gwen.\nWhen we sort their names alphabetically, we get Bernard, Gwen, Louis, Vincent.\nThe name of the second penguin sorted by alphabetical order is Gwen.\nThe name of the second penguin sorted by alphabetic order is Gwen. So the answer is (D).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_penguins_in_a_table" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/reasoning_about_colored_objects.yaml index a93951da..f4ee36e6 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/reasoning_about_colored_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/reasoning_about_colored_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "reasoning_about_colored_objects" "description": "Answer extremely simple questions about the colors of objects on a surface.\n\n" "doc_to_text": " On the nightstand, there is a red pencil, a purple mug, a burgundy keychain, a fuchsia teddy bear, a black plate, and a blue stress ball. What color is the stress ball?\nOptions:\n(A) red\n(B) orange\n(C) yellow\n(D) green\n(E) blue\n(F) brown\n(G) magenta\n(H) fuchsia\n(I) mauve\n(J) teal\n(K) turquoise\n(L) burgundy\n(M) silver\n(N) gold\n(O) black\n(P) grey\n(Q) purple\n(R) pink\nA: Let's think step by step.\nAccording to this question, the color of the stress ball is blue. So the answer is (E). On the table, you see a bunch of objects arranged in a row: a purple paperclip, a pink stress ball, a brown keychain, a green scrunchiephone charger, a mauve fidget spinner, and a burgundy pen. What is the color of the object directly to the right of the stress ball?\nOptions:\n(A) red\n(B) orange\n(C) yellow\n(D) green\n(E) blue\n(F) brown\n(G) magenta\n(H) fuchsia\n(I) mauve\n(J) teal\n(K) turquoise\n(L) burgundy\n(M) silver\n(N) gold\n(O) black\n(P) grey\n(Q) purple\n(R) pink\nA: Let's think step by step.\nAccording to this question, the objects are arranged in a row, from left to right, as follows: (1) a purple paperclip, (2) a pink stress ball, (3) a brown keychain, (4) a green scrunchiephone charger, (5) a mauve fidget spinner, (6) a burgundy pen.\nThe stress ball is the second object on the list, namely (2). The object that is to the right of the stress ball corresponds to (3), which is a brown keychain.\nThe color of the keychain is brown. So the answer is (F). On the nightstand, you see the following items arranged in a row: a teal plate, a burgundy keychain, a yellow scrunchiephone charger, an orange mug, a pink notebook, and a grey cup. How many non-orange items do you see to the left of the teal item?\nOptions:\n(A) zero\n(B) one\n(C) two\n(D) three\n(E) four\n(F) five\n(G) six\nA: Let's think step by step.\nAccording to this question, the objects are arranged in a row, from left to right, as follows: (1) a teal plate, (2) a burgundy keychain, (3) a yellow scrunchiephone charger, (4) an orange mug, (5) a pink notebook, (6) a grey cup.\nThe teal plate is the first item, namely (1). There is no item to the left of the teal item.\nThe number of non-orange items to the left of the teal item is zero. So the answer is (A).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_reasoning_about_colored_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/ruin_names.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/ruin_names.yaml index d6a3485b..3b3f7799 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/ruin_names.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/ruin_names.yaml @@ -1,5 +1,5 @@ "dataset_name": "ruin_names" "description": "Select the humorous edit that 'ruins' the input movie or musical artist name.\n\n" "doc_to_text": " Which of the following is a humorous edit of this artist or movie name: 'whitesnake'?\nOptions:\n(A) whitesnape\n(B) whitesnapke\n(C) whitesnuake\n(D) mwhitesnake\nA: Let's think step by step.\nThe original name is \"whitesnake\". This is the name of an old English hard rock band. It is a compound word, formed by the words \"white\" and \"snake\".\n(A) \"whitesnape\": It is formed by the combination of \"white\" and \"snake\"; therefore, \"snake\" has been changed to \"snape\". Snape makes a reference to the fictional character Severus Snape in the Harry Potter series, so (A) is indeed a meaningful and funny edit.\n(B) \"whitesnapke\": It is formed by the combination of \"white\" and \"snapke\", but \"snapke\" is not an actual word; therefore, \"whitesnapke\" is not humorous.\n(C) \"whitesnuake\": It is formed by the combination of \"white\" and \"snuake\", but \"snuake\" is not an actual word; therefore, \"whitesnuake\" is not humorous.\n(D) \"mwhitesnake\": It is formed by the combination of \"m\", \"white\", and \"snake\", but the prefix \"-m \"seems arbitrary; therefore, \"mwhitesnake\" is not meaningful or humorous.\nAbove the above, the only humorous edit is (A). So the answer is (A). Which of the following is a humorous edit of this artist or movie name: 'one of our dinosaurs is missing'?\nOptions:\n(A) ofne of our dinosaurs is missing\n(B) one af our dinosaurs is missing\n(C) one of our dinosaurs is pissing\n(D) one of our dinosaur is missing\nA: Let's think step by step.\nThe original name is \"one of our dinosaurs is missing\". This is the name of an old British movie.\n(A) \"ofne of our dinosaurs is missing\": Here \"one of\" is changed to \"ofne\", but the word \"ofne\" is not an actual word.\n(B) \"one af our dinosaurs is missing\": Here the word \"of\" is changed to \"af\", but the word \"af\" is not an actual word.\n(C) \"one of our dinosaurs is pissing\": Here the word \"missing\" is changed to \"pissing\", and \"one of our dinosaurs is pissing\" is indeed a very whimsical and mischievous edit. This change truly ruins the original title of the movie.\n(D) \"one of our dinosaur is missing\": Here the word \"dinosaurs\" is changed to \"dinosaur\", but \"dinosaur\" is singular but should be plural in the title; this change therefore feels arbitrary and not humorous.\nAbove the above, the only humorous edit is (C).\nAbove the above, the only humorous edit is (C). So the answer is (C). Which of the following is a humorous edit of this artist or movie name: 'counting crows'?\nOptions:\n(A) countingy crows\n(B) counting cows\n(C) courting crows\n(D) coutnting crows\nA: Let's think step by step.\nThe original name is \"counting crows\". This is the name of an American rock band. Historically, the band name comes from the British nursery rhyme \"One for Sorrow\", which is about counting of magpies.\n(A) \"countingy crows\": Here the word \"counting\" is changed to \"countingy\", but the word \"countingy\" is not an actual word.\n(B) \"counting cows\": Here the word \"crows\" is changed to \"cows\", and this is indeed a playful and meaningful edit that ruins the original name of the band.\n(C) \"courting crows\": Here the word \"counting\" is changed to \"courting\", and \"courting\" is an actual word; however, \"courting crows\" does not sound as humorous as \"counting cows\".\n(D) \"coutnting crows\": Here the word \"counting\" is changed to \"coutnting\", but the word \"coutnting\" is not an actual word.\nAbove the above, the only humorous edit is (B). So the answer is (B).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_ruin_names" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/salient_translation_error_detection.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/salient_translation_error_detection.yaml index 2aa42072..1d4c13a6 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/salient_translation_error_detection.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/salient_translation_error_detection.yaml @@ -1,5 +1,5 @@ "dataset_name": "salient_translation_error_detection" "description": "Detect the type of error in an English translation of a German source sentence.\n\n" "doc_to_text": " The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: In der Liste der Baudenkmale in Lenzen (Elbe) sind alle Baudenkmale der brandenburgischen Stadt Lenzen (Elbe) und ihrer Ortsteile aufgelistet.\nTranslation: In the list of architectural monuments in Lenzen all architectural monuments of the Brandenburg city of Lenzen and its districts are listed.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: Let's think step by step.\nWe solve this question by first translating the source sentence to English and then by comparing our translation with the provided translation. According to Google Translate, the correct translation of the source sentence from German to English is \"The list of monuments in Lenzen (Elbe) includes all the monuments in the Brandenburg town of Lenzen (Elbe) and its districts.\" On the other hand, the provided translation is \"In the list of architectural monuments in Lenzen all architectural monuments of the Brandenburg city of Lenzen and its districts are listed.\" Note that Lenzen (Elbe) is changed to Lenzen in the original translation; so, there is a named entity error. Because an entity in the original source sentence is changed to a different entity in the translation, the translation contains an error pertaining to Named Entities. So the answer is (D). The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: Auf dieser Seite sind die Baudenkmäler der oberbayerischen Großen Kreisstadt Landsberg am Lech zusammengestellt.\nTranslation: On this page are compiled the architectural monuments of the town of Landsberg am Lech.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: Let's think step by step.\nWe solve this question by first translating the source sentence to English and then by comparing our translation with the provided translation. According to Google Translate, the correct translation of the source sentence from German to English is \"The monuments of the Upper Bavarian district town of Landsberg am Lech are compiled on this page.\" On the other hand, the provided translation is \"On this page are compiled the architectural monuments of the town of Landsberg am Lech.\" Note that an important detail about the location of Landsberg am Lech is omitted in the original translation: The translation should have said \"Upper Bavarian district town of Landsberg am Lech\". Because a significant clause in the translation was removed, the translation contains an error pertaining to Dropped Content. So the answer is (E). The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: Łeba ist eine Kleinstadt und ein Badeort im Powiat Lęborski der polnischen Woiwodschaft Pommern.\nTranslation: Eba is not a small town and seaside resort in the Powiat Léborski county of the Pomeranian Voivodeship of Poland.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: Let's think step by step.\nWe solve this question by first translating the source sentence to English and then by comparing our translation with the provided translation. According to Google Translate, the correct translation of the source sentence from German to English is \"Łeba is a small town and seaside resort in the Powiat Lęborski of the Polish Pomeranian Voivodeship.\" On the other hand, the provided translation is \"Łeba is not a small town and seaside resort in the Powiat Léborski county of the Pomeranian Voivodeship of Poland.\" Note that the provided sentence says, \"Łeba is not a small town ...\" However, the translation should have been \"Łeba is a small town ...\" Because a negation is introduced at the beginning of the sentence and has fundamentally changed the meaning of the original source, the translation contains an error pertaining to Negation or Antonyms. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_salient_translation_error_detection" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/snarks.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/snarks.yaml index 0138faa7..060ff768 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/snarks.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/snarks.yaml @@ -1,5 +1,5 @@ "dataset_name": "snarks" "description": "Determine which of two sentences is sarcastic.\n\nAccording to Cambridge University Dictionary, sarcasm is \"the use of remarks that clearly mean the opposite of what they say, made in order to hurt someone's feelings or to criticize something in a humorous way.\" Sarcastic sentences often contain satirical or ironic utterances, hyperboles, ambivalent or witty remarks.\n\n" "doc_to_text": " Which statement is sarcastic?\nOptions:\n(A) Yes, because having interests and actively researching them is a huge waste\n(B) Yes, because having interests and actively researching them is a huge deal\nA: Let's think step by step.\nIf we look at (A), it says that having interests and actively researching them is a huge waste, implying that it is a useless effort. However, we know that having interests and actively researching them is typically not a waste but rather is beneficial to the individual. The presence of such a juxtaposition in (A) suggests that it contains a taste of irony and sarcasm.\nIf we look at (B), it says that having interests and actively researching them is a huge deal, implying that it is an important and consequential effort. This is arguably a neutral and correct statement.\nAbove the above, the sarcastic option is (A). So the answer is (A). Which statement is sarcastic?\nOptions:\n(A) No one is going to disagree with you on this. Avoiding ad hominem attacks really help your case\n(B) No one is going to disagree with you on this. Ad hominem attacks really help your case\nA: Let's think step by step.\nIf we look at (A), it says that avoiding ad hominem attacks really help your case, implying that ad hominem attacks are adverse and injurious. Because ad hominem attacks are adressed at a person rather than an idea, it is indeed true that avoiding them is often useful and helpful; so, (A) is a neutral (valid and agreeable) statement.\nIf we look at (B), it says that ad hominem attacks really help your case, implying that ad hominem attacks are a positive thing. However, we stated previously that ad hominem attacks are often not useful or constructive. The speaker in this sentence therefore seems to mean the opposite of what they are saying; so, there appears to have a taste of irony and sarcasm in (B).\nAbove the above, the sarcastic option is (B). So the answer is (B). Which statement is sarcastic?\nOptions:\n(A) Consistency in the league's punishments? What do you think this is supposed to be, politics?\n(B) Consistency in the league's punishments? What do you think this is supposed to be, moral?\nA: Let's think step by step.\nIf we look at (A), it likens the consistency in the league's punishments with that in politics. Because politics or political affairs are often not considered to be consistent or dependable, this sentence appears to be satirical.\nIf we look at (B), it likens the consistency in the league's punishments with that in morality. Discussing the consistency of the league's punishments in the context of morality, ethics, or law makes sense and does not appear to make a satirical point about anything.\nAbove the above, the sarcastic option is (A). So the answer is (A).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_snarks" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/sports_understanding.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/sports_understanding.yaml index 90c0f191..6f561df9 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/sports_understanding.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/sports_understanding.yaml @@ -1,5 +1,5 @@ "dataset_name": "sports_understanding" "description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n" "doc_to_text": " Is the following sentence plausible? \"Bam Adebayo scored a reverse layup in the Western Conference Finals.\"\nA: Let's think step by step. Bam Adebayo is an American basketball player. Scoring a reverse layup in the Western Conference Finals is part of the NBA Finals. So the answer is yes. Is the following sentence plausible? \"Santi Cazorla scored a touchdown.\"\nA: Let's think step by step. Santi Cazorla is a soccer player. Touchdown is part of American football and rugby. So the answer is no. Is the following sentence plausible? \"DeMar DeRozan was called for the goal tend.\"\nA: Let's think step by step. DeMar DeRozan is an American basketball player. Goal tending is part of basketball. So the answer is yes.Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_sports_understanding" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/temporal_sequences.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/temporal_sequences.yaml index cff56746..2c55788f 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/temporal_sequences.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/temporal_sequences.yaml @@ -1,5 +1,5 @@ "dataset_name": "temporal_sequences" "description": "Task description: Answer questions about which times certain events could have occurred.\n\n" "doc_to_text": " Today, Emily went to the museum. Between what times could they have gone?\nWe know that:\nEmily woke up at 1pm.\nElizabeth saw Emily reading at the library from 2pm to 4pm.\nJessica saw Emily watching a movie at the theater from 4pm to 5pm.\nLeslie saw Emily waiting at the airport from 5pm to 6pm.\nWilliam saw Emily buying clothes at the mall from 6pm to 7pm.\nThe museum was closed after 7pm.\nBetween what times could Emily have gone to the museum?\nOptions:\n(A) 1pm to 2pm\n(B) 6pm to 7pm\n(C) 5pm to 6pm\n(D) 2pm to 4pm\nA: Let's think step by step.\nWake-up time: 1pm.\n1pm-2pm: free.\n2pm-4pm: reading at the library.\n4pm-5pm: watching a movie at the theater.\n5pm-6pm: waiting at the airport.\n6pm-7pm: buying clothes at the mall.\nThe museum closure time: 7pm.\nThe only time when Emily could have gone to the museum was 1pm to 2pm. So the answer is (A). Today, Elizabeth went to the amusement park. Between what times could they have gone?\nWe know that:\nElizabeth woke up at 7am.\nDavid saw Elizabeth fixing their computer at the electronic store from 1pm to 2pm.\nSarah saw Elizabeth playing tennis at the tennis court from 2pm to 3pm.\nSusan saw Elizabeth walking towards the Statue of Liberty from 3pm to 6pm.\nAndrew saw Elizabeth taking photos near the Eiffel Tower from 6pm to 9pm.\nEmily saw Elizabeth getting a coffee at the cafe from 9pm to 10pm.\nThe amusement park was closed after 10pm.\nBetween what times could Elizabeth have gone to the amusement park?\nOptions:\n(A) 7am to 1pm\n(B) 9pm to 10pm\n(C) 1pm to 2pm\n(D) 3pm to 6pm\nA: Let's think step by step.\nWake-up time: 7am.\n7am-1pm: free.\n1pm-2pm: fixing their computer at the electronic store.\n2pm-3pm: playing tennis at the tennis court.\n3pm-6pm: walking towards the Statue of Liberty.\n6pm-9pm: taking photos near the Eiffel Tower.\n9pm-10pm: getting a coffee at the cafe.\nThe amusement park closure time: 10pm.\nThe only time when Elizabeth could have gone to the amusement park was 7am to 1pm. So the answer is (A). Today, Tiffany went to the beach. Between what times could they have gone?\nWe know that:\nTiffany woke up at 5am.\nBetty saw Tiffany getting a coffee at the cafe from 5am to 6am.\nJessica saw Tiffany working at the office from 6am to 9am.\nJohn saw Tiffany stretching at a yoga studio from 9am to 12pm.\nSean saw Tiffany sitting on a rooftop from 12pm to 2pm.\nSarah saw Tiffany playing tennis at the tennis court from 2pm to 3pm.\nThe beach was closed after 4pm.\nBetween what times could Tiffany have gone to the beach?\nOptions:\n(A) 9am to 12pm\n(B) 12pm to 2pm\n(C) 5am to 6am\n(D) 3pm to 4pm\nA: Let's think step by step.\nWake-up time: 5am.\n5am-6am: getting a coffee at the cafe.\n6am-9am: working at the office.\n9am-12pm: stretching at a yoga studio.\n12pm-2pm: sitting on a rooftop.\n2pm-3pm: playing tennis at the tennis court.\n3pm-4pm: free.\nThe beach closure time: 4pm.\nThe only time when Tiffany could have gone to the beach was 3pm to 4pm. So the answer is (D).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_temporal_sequences" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_five_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_five_objects.yaml index a4fd3995..415ad429 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_five_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_five_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_five_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": " Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: yellow, Bob: blue, Claire: pink.\n(1) Claire and Alice swap balls: Alice: pink, Bob: blue, Claire: yellow.\n(2) Alice and Bob swap balls: Alice: blue, Bob: pink, Claire: yellow.\n(3) Claire and Bob swap balls: Alice: blue, Bob: yellow, Claire: pink.\nAt the end of the game, Bob has the yellow ball. So the answer is (A). Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: white, Bob: purple, Claire: pink.\n(1) Bob and Alice swap balls: Alice: purple, Bob: white, Claire: pink.\n(2) Bob and Claire swap balls: Alice: purple, Bob: pink, Claire: white.\n(3) Bob and Alice swap balls: Alice: pink, Bob: purple, Claire: white.\nAt the end of the game, Alice has the pink ball. So the answer is (C). Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: Let's think step by step.\n(0) At the start: Alice: Lola, Bob: Rodrigo, Claire: Patrick.\n(1) Alice and Bob switch partners: Alice: Rodrigo, Bob: Lola, Claire: Patrick.\n(2) Claire and Bob switch partners: Alice: Rodrigo, Bob: Patrick, Claire: Lola.\n(3) Bob and Alice switch partners: Alice: Patrick, Bob: Rodrigo, Claire: Lola.\nAt the end of the dance, Alice is dancing with Patrick. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_tracking_shuffled_objects_five_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_seven_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_seven_objects.yaml index 28ff5389..fbcffccf 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_seven_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_seven_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_seven_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": " Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: yellow, Bob: blue, Claire: pink.\n(1) Claire and Alice swap balls: Alice: pink, Bob: blue, Claire: yellow.\n(2) Alice and Bob swap balls: Alice: blue, Bob: pink, Claire: yellow.\n(3) Claire and Bob swap balls: Alice: blue, Bob: yellow, Claire: pink.\nAt the end of the game, Bob has the yellow ball. So the answer is (A). Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: white, Bob: purple, Claire: pink.\n(1) Bob and Alice swap balls: Alice: purple, Bob: white, Claire: pink.\n(2) Bob and Claire swap balls: Alice: purple, Bob: pink, Claire: white.\n(3) Bob and Alice swap balls: Alice: pink, Bob: purple, Claire: white.\nAt the end of the game, Alice has the pink ball. So the answer is (C). Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: Let's think step by step.\n(0) At the start: Alice: Lola, Bob: Rodrigo, Claire: Patrick.\n(1) Alice and Bob switch partners: Alice: Rodrigo, Bob: Lola, Claire: Patrick.\n(2) Claire and Bob switch partners: Alice: Rodrigo, Bob: Patrick, Claire: Lola.\n(3) Bob and Alice switch partners: Alice: Patrick, Bob: Rodrigo, Claire: Lola.\nAt the end of the dance, Alice is dancing with Patrick. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_tracking_shuffled_objects_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_three_objects.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_three_objects.yaml index 84526656..320b31f2 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_three_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/tracking_shuffled_objects_three_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_three_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": " Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: yellow, Bob: blue, Claire: pink.\n(1) Claire and Alice swap balls: Alice: pink, Bob: blue, Claire: yellow.\n(2) Alice and Bob swap balls: Alice: blue, Bob: pink, Claire: yellow.\n(3) Claire and Bob swap balls: Alice: blue, Bob: yellow, Claire: pink.\nAt the end of the game, Bob has the yellow ball. So the answer is (A). Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: white, Bob: purple, Claire: pink.\n(1) Bob and Alice swap balls: Alice: purple, Bob: white, Claire: pink.\n(2) Bob and Claire swap balls: Alice: purple, Bob: pink, Claire: white.\n(3) Bob and Alice swap balls: Alice: pink, Bob: purple, Claire: white.\nAt the end of the game, Alice has the pink ball. So the answer is (C). Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: Let's think step by step.\n(0) At the start: Alice: Lola, Bob: Rodrigo, Claire: Patrick.\n(1) Alice and Bob switch partners: Alice: Rodrigo, Bob: Lola, Claire: Patrick.\n(2) Claire and Bob switch partners: Alice: Rodrigo, Bob: Patrick, Claire: Lola.\n(3) Bob and Alice switch partners: Alice: Patrick, Bob: Rodrigo, Claire: Lola.\nAt the end of the dance, Alice is dancing with Patrick. So the answer is (C).Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_tracking_shuffled_objects_three_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/web_of_lies.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/web_of_lies.yaml index 69857561..755ce9f9 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/web_of_lies.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/web_of_lies.yaml @@ -1,5 +1,5 @@ "dataset_name": "web_of_lies" "description": "Evaluate a random boolean function expressed as a word problem.\n\n" "doc_to_text": " Question: Fidel tells the truth. Jerry says Fidel tells the truth. Vina says Jerry tells the truth. Millicent says Vina lies. Raymond says Millicent lies. Does Raymond tell the truth?\nA: Let's think step by step.\n(1) Fidel tells the truth. So, we know that Fidel tells the truth.\n(2) Jerry says Fidel tells the truth. Since we know from (1) that Fidel tells the truth, if Jerry says that Fidel tells the truth, then Jerry tells the truth.\n(3) Vina says Jerry tells the truth. Since we know from (2) that Jerry tells the truth, if Vina says Jerry tells the truth, then Vine tells the truth.\n(4) Millicent says Vina lies. Since we know from (3) that Vina tells the truth, if Millicent says Vina lies, then Millicent lies.\n(5) Raymond says Millicent lies. Since we know from (4) that Millicent lies, if Raymond says Millicent lies, then Raymond tells the truth.\nNow, the question asks: Does Raymond tell the truth? We know from (5) that Raymond tells the truth. So the answer is Yes. Question: Kristian lies. Millie says Kristian lies. Maybelle says Millie tells the truth. Fidel says Maybelle lies. Leda says Fidel lies. Does Leda tell the truth?\nA: Let's think step by step.\n(1) Kristian lies. So, we know that Kristian lies.\n(2) Millie says Kristian lies. Since we know from (1) that Kristian lies, if Millie says Kristian lies, then Millie tells the truth.\n(3) Maybelle says Millie tells the truth. Since we know from (2) that Millie tells the truth, if Maybelle says Millie tells the truth, then Maybelle tells the truth.\n(4) Fidel says Maybelle lies. Since we know from (3) that Maybelle tells the truth, if Fidel says Maybelle lies, then Fidel lies.\n(5) Leda says Fidel lies. Since we know from (4) that Fidel lies, if Leda says Fidel lies, then Leda tells the truth.\nNow, the question asks: Does Leda tell the truth? We know from (5) that Leda tells the truth. So the answer is Yes. Question: Kristian tells the truth. Michaela says Kristian lies. Raymond says Michaela tells the truth. Osvaldo says Raymond tells the truth. Jamey says Osvaldo tells the truth. Does Jamey tell the truth?\nA: Let's think step by step.\n(1) Kristian tells the truth. So, we know that Kristian tells the truth.\n(2) Michaela says Kristian lies. Since we know from (1) that Kristian tells the truth, if Michaela says Kristian lies, then Michaela lies.\n(3) Raymond says Michaela tells the truth. Since we know from (2) that Michaela lies, if Raymond says Michaela tells the truth, then Raymond lies.\n(4) Osvaldo says Raymond tells the truth. Since we know from (3) that Raymond lies, if Osvaldo says Raymond tells the truth, then Osvaldo lies.\n(5) Jamey says Osvaldo tells the truth. Since we know from (4) that Osvaldo lies, if Jamey says Osvaldo tells the truth, then Jamey lies.\nNow, the question asks: Does Jamey tell the truth? We know from (5) that Jamey lies. So the answer is No.Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_web_of_lies" diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/word_sorting.yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/word_sorting.yaml index 0e2e1a88..d6b9f1bc 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/word_sorting.yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/word_sorting.yaml @@ -1,5 +1,5 @@ "dataset_name": "word_sorting" "description": "Sort a list of words.\n\n" "doc_to_text": " Sort the following words alphabetically: List: oven costume counterpart\nA: Let's think step by step.\nThe first letter: \"oven\": \"o\" (15). \"costume\": \"c\" (3). \"counterpart\": \"c\" (3). We now have: (3) [\"costume\" ? \"counterpart\"] < (15) \"oven\". Now let's sort this subpart [\"costume\" ? \"counterpart\"] by looking at their second letters.\nThe second letter: \"costume\": \"o\" (15). \"counterpart\": \"o\" (15). We now have: (15) [\"costume\" ? \"counterpart\"]. Now let's sort this subpart [\"costume\" ? \"counterpart\"] by looking at their third letters.\nThe third letter: \"costume\": \"s\" (19). \"counterpart\": \"u\" (21). We now have: (19) \"costume\" < (21) \"counterpart\". Hence, we have [\"costume\" < \"counterpart\"] < \"oven\". So the answer is costume counterpart oven. Sort the following words alphabetically: List: hypochlorite ponderosa phone credulity\nA: Let's think step by step.\nThe first letter: \"hypochlorite\": \"h\" (8). \"ponderosa\": \"p\" (16). \"phone\": \"p\" (16). \"credulity\": \"c\" (3). We now have: (3) \"credulity\" < (8) \"hypochlorite\" < (16) [\"ponderosa\" ? \"phone\"]. Now let's sort this subpart [\"ponderosa\" ? \"phone\"] by looking at their second letters.\nThe second letter: \"ponderosa\": \"o\" (15). \"phone\": \"h\" (8). We now have: (8) \"phone\" < (15) \"ponderosa\". Hence, we have \"credulity\" < \"hypochlorite\" < [\"phone\" <\"ponderosa\"]. So the answer is credulity hypochlorite phone ponderosa. Sort the following words alphabetically: List: newt arson parthia seismography mugho aspect census\nA: Let's think step by step.\nThe first letter: \"newt\": \"n\" (14). \"arson\": \"a\" (1). \"parthia\": \"p\" (16). \"seismography\": \"s\" (19). \"mugho\": \"m\" (13). \"aspect\": \"a\" (1). \"census\": \"c\" (3). We now have: (1) [\"arson\" ? \"aspect\"] < (3) \"census\" < (13) \"mugho\" < (14) \"newt\" < (16) \"parthia\" < (19) \"seismography\". Now let's sort this subpart [\"arson\" ? \"aspect\"] by looking at their second letters.\nThe second letter: \"arson\": \"r\" (18). \"aspect\": \"s\" (19). We now have: (18) \"arson\" < (19) \"aspect\". Hence, we have [\"arson\" < \"aspect\"] < \"census\" < \"mugho\" < \"newt\" < \"parthia\" < \"seismography\". So the answer is arson aspect census mugho newt parthia seismography.Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_fewshot_template_yaml" "task": "bbh_flan_cot_fewshot_word_sorting" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/boolean_expressions.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/boolean_expressions.yaml index 04a6d1e5..7ac62ea6 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/boolean_expressions.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/boolean_expressions.yaml @@ -1,5 +1,5 @@ "dataset_name": "boolean_expressions" "description": "Evaluate the result of a random Boolean expression.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_boolean_expressions" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/causal_judgement.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/causal_judgement.yaml index 73ed31c8..39fc39b1 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/causal_judgement.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/causal_judgement.yaml @@ -1,5 +1,5 @@ "dataset_name": "causal_judgement" "description": "Answer questions about causal attribution.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_causal_judgement" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/date_understanding.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/date_understanding.yaml index 11ad75db..94eb0f2c 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/date_understanding.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/date_understanding.yaml @@ -1,5 +1,5 @@ "dataset_name": "date_understanding" "description": "Infer the date from context.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_date_understanding" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/disambiguation_qa.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/disambiguation_qa.yaml index b8b13d78..cb6946db 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/disambiguation_qa.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/disambiguation_qa.yaml @@ -1,5 +1,5 @@ "dataset_name": "disambiguation_qa" "description": "Clarify the meaning of sentences with ambiguous pronouns.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_disambiguation_qa" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/dyck_languages.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/dyck_languages.yaml index 5995b6c4..c09dd0e8 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/dyck_languages.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/dyck_languages.yaml @@ -1,5 +1,5 @@ "dataset_name": "dyck_languages" "description": "Correctly close a Dyck-n word.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_dyck_languages" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/formal_fallacies.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/formal_fallacies.yaml index 6b029e7e..28ca9fb3 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/formal_fallacies.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/formal_fallacies.yaml @@ -1,5 +1,5 @@ "dataset_name": "formal_fallacies" "description": "Distinguish deductively valid arguments from formal fallacies.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_formal_fallacies" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/geometric_shapes.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/geometric_shapes.yaml index acb91aa4..6de27bce 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/geometric_shapes.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/geometric_shapes.yaml @@ -1,5 +1,5 @@ "dataset_name": "geometric_shapes" "description": "Name geometric shapes from their SVG paths.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_geometric_shapes" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/hyperbaton.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/hyperbaton.yaml index dbe1280b..3fd6d246 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/hyperbaton.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/hyperbaton.yaml @@ -1,5 +1,5 @@ "dataset_name": "hyperbaton" "description": "Order adjectives correctly in English sentences.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_hyperbaton" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_five_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_five_objects.yaml index 5592252a..6ab7c57e 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_five_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_five_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_five_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_logical_deduction_five_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_seven_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_seven_objects.yaml index c85b9d21..5c7e22df 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_seven_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_seven_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_seven_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_logical_deduction_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_three_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_three_objects.yaml index e94f8e4a..6337dfd1 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_three_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/logical_deduction_three_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_three_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_logical_deduction_three_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/movie_recommendation.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/movie_recommendation.yaml index 038119fc..0bcb95c9 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/movie_recommendation.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/movie_recommendation.yaml @@ -1,5 +1,5 @@ "dataset_name": "movie_recommendation" "description": "Recommend movies similar to the given list of movies.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_movie_recommendation" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/multistep_arithmetic_two.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/multistep_arithmetic_two.yaml index c21c1b8c..7cb710ea 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/multistep_arithmetic_two.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/multistep_arithmetic_two.yaml @@ -1,5 +1,5 @@ "dataset_name": "multistep_arithmetic_two" "description": "Solve multi-step arithmetic problems.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_multistep_arithmetic_two" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/navigate.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/navigate.yaml index c8ea0681..d534c729 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/navigate.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/navigate.yaml @@ -1,5 +1,5 @@ "dataset_name": "navigate" "description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_navigate" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/object_counting.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/object_counting.yaml index 6d6a4721..3457954a 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/object_counting.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/object_counting.yaml @@ -1,5 +1,5 @@ "dataset_name": "object_counting" "description": "Questions that involve enumerating objects and asking the model to count them.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_object_counting" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/penguins_in_a_table.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/penguins_in_a_table.yaml index c5501700..fa03b5b7 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/penguins_in_a_table.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/penguins_in_a_table.yaml @@ -1,5 +1,5 @@ "dataset_name": "penguins_in_a_table" "description": "Answer questions about a table of penguins and their attributes.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_penguins_in_a_table" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/reasoning_about_colored_objects.yaml index 26789385..7cc6d7f6 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/reasoning_about_colored_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/reasoning_about_colored_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "reasoning_about_colored_objects" "description": "Answer extremely simple questions about the colors of objects on a surface.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_reasoning_about_colored_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/ruin_names.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/ruin_names.yaml index 3289b750..17d4c32d 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/ruin_names.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/ruin_names.yaml @@ -1,5 +1,5 @@ "dataset_name": "ruin_names" "description": "Select the humorous edit that 'ruins' the input movie or musical artist name.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_ruin_names" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/salient_translation_error_detection.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/salient_translation_error_detection.yaml index c8113e62..58f6b0c2 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/salient_translation_error_detection.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/salient_translation_error_detection.yaml @@ -1,5 +1,5 @@ "dataset_name": "salient_translation_error_detection" "description": "Detect the type of error in an English translation of a German source sentence.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_salient_translation_error_detection" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/snarks.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/snarks.yaml index b9da41c7..b31a8f94 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/snarks.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/snarks.yaml @@ -1,5 +1,5 @@ "dataset_name": "snarks" "description": "Determine which of two sentences is sarcastic.\n\nAccording to Cambridge University Dictionary, sarcasm is \"the use of remarks that clearly mean the opposite of what they say, made in order to hurt someone's feelings or to criticize something in a humorous way.\" Sarcastic sentences often contain satirical or ironic utterances, hyperboles, ambivalent or witty remarks.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_snarks" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/sports_understanding.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/sports_understanding.yaml index dbf21164..0a44ba92 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/sports_understanding.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/sports_understanding.yaml @@ -1,5 +1,5 @@ "dataset_name": "sports_understanding" "description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_sports_understanding" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/temporal_sequences.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/temporal_sequences.yaml index 84db7993..573f5e43 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/temporal_sequences.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/temporal_sequences.yaml @@ -1,5 +1,5 @@ "dataset_name": "temporal_sequences" "description": "Task description: Answer questions about which times certain events could have occurred.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_temporal_sequences" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_five_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_five_objects.yaml index 4b6ec1ad..e1b7d1c2 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_five_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_five_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_five_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_tracking_shuffled_objects_five_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml index 99dbcc33..17e5dcbc 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_seven_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_tracking_shuffled_objects_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_three_objects.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_three_objects.yaml index 4f9ff8e7..8c3d7c4d 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_three_objects.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/tracking_shuffled_objects_three_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_three_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_tracking_shuffled_objects_three_objects" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/web_of_lies.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/web_of_lies.yaml index 5304cdfc..adec5f3c 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/web_of_lies.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/web_of_lies.yaml @@ -1,5 +1,5 @@ "dataset_name": "web_of_lies" "description": "Evaluate a random boolean function expressed as a word problem.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_web_of_lies" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/word_sorting.yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/word_sorting.yaml index 62f0a6aa..50276209 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/word_sorting.yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/word_sorting.yaml @@ -1,5 +1,5 @@ "dataset_name": "word_sorting" "description": "Sort a list of words.\n\n" "doc_to_text": "Q: {{input}}\nA: Let's think step by step.\n" -"include": "_template_yaml" +"include": "_flan_cot_zeroshot_template_yaml" "task": "bbh_flan_cot_zeroshot_word_sorting" diff --git a/lm_eval/tasks/bbh/flan_fewshot/boolean_expressions.yaml b/lm_eval/tasks/bbh/flan_fewshot/boolean_expressions.yaml index 19d24f3f..35436bd7 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/boolean_expressions.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/boolean_expressions.yaml @@ -1,5 +1,5 @@ "dataset_name": "boolean_expressions" "description": "Evaluate the result of a random Boolean expression.\n\n" "doc_to_text": "Q: not ( ( not not True ) ) is\nA: False\n\nQ: True and False and not True and True is\nA: False\n\nQ: not not ( not ( False ) ) is\nA: True\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_boolean_expressions" diff --git a/lm_eval/tasks/bbh/flan_fewshot/causal_judgement.yaml b/lm_eval/tasks/bbh/flan_fewshot/causal_judgement.yaml index b9dd8f6e..5857204c 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/causal_judgement.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/causal_judgement.yaml @@ -1,5 +1,5 @@ "dataset_name": "causal_judgement" "description": "Answer questions about causal attribution.\n\n" "doc_to_text": "Q: How would a typical person answer each of the following questions about causation?\nFrank T., had an ongoing dispute with his neighbor over a stretch of land and one day decided to shoot his neighbor in the body. Frank T. had no experience with guns, his hand slipped on the barrel of the gun, and the shot went wild. Nonetheless, the bullet bounced off a large boulder several feet away and hit the neighbor's body, causing significant injury. Did Frank T. intentionally shoot his neighbor in the body?\nOptions:\n- Yes\n- No\nA: No\n\nQ: How would a typical person answer each of the following questions about causation?\nSuzy and Billy are working on a project that is very important for our nation's security. The boss tells them both: \"Be sure that you are here at exactly 9 am. It is absolutely essential that you arrive at that time.\" Both Billy and Suzy arrive at 9 am. As it happens, there was a motion detector installed in the room where they arrived. The motion detector was set up to be triggered if at least one person appeared in the room at the same time. So the motion detector went off. Did Billy cause the motion detector to go off?\nOptions:\n- Yes\n- No\nA: Yes\n\nQ: How would a typical person answer each of the following questions about causation?\nGeorge and his sister Lena reunite at their parents' house for Thanksgiving. Whereas George just got into medical school, Lena is unhappy in her marriage and recently lost her job. Over the course of the day, George and Lena get into a number of heated arguments. Later in the afternoon they play a game of darts. They split the first two games, and the third game is close until the end. Who will win comes down to George's last shot. If he hits a high point region, he wins; if he hits a low point region, Lena wins. George thinks of the difficult time Lena is having, and he really wants to let her win. He aims the dart at the low point region. He sets up his shot and the dart lands in the low point region. After his shot, Lena wins the game and is very happy. Did George hit the low point region intentionally?\nOptions:\n- Yes\n- No\nA: Yes\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_causal_judgement" diff --git a/lm_eval/tasks/bbh/flan_fewshot/date_understanding.yaml b/lm_eval/tasks/bbh/flan_fewshot/date_understanding.yaml index 5ed01c22..8b0937cf 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/date_understanding.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/date_understanding.yaml @@ -1,5 +1,5 @@ "dataset_name": "date_understanding" "description": "Infer the date from context.\n\n" "doc_to_text": "Q: Today is Christmas Eve of 1937. What is the date 10 days ago in MM/DD/YYYY?\nOptions:\n(A) 12/14/2026\n(B) 12/14/1950\n(C) 12/14/2007\n(D) 12/14/1937\n(E) 07/14/1938\n(F) 12/14/1988\nA: (D)\n\nQ: Tomorrow is 11/12/2019. What is the date one year ago from today in MM/DD/YYYY?\nOptions:\n(A) 09/04/2018\n(B) 11/11/2018\n(C) 08/25/2018\n(D) 11/02/2018\n(E) 11/04/2018\nA: (B)\n\nQ: Jane and John married on Jan 2, 1958. It is their 5-year anniversary today. What is the date tomorrow in MM/DD/YYYY?\nOptions:\n(A) 01/11/1961\n(B) 01/03/1963\n(C) 01/18/1961\n(D) 10/14/1960\n(E) 01/03/1982\n(F) 12/03/1960\nA: (B)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_date_understanding" diff --git a/lm_eval/tasks/bbh/flan_fewshot/disambiguation_qa.yaml b/lm_eval/tasks/bbh/flan_fewshot/disambiguation_qa.yaml index 0c04056f..bfa3f5d1 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/disambiguation_qa.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/disambiguation_qa.yaml @@ -1,5 +1,5 @@ "dataset_name": "disambiguation_qa" "description": "Clarify the meaning of sentences with ambiguous pronouns.\n\n" "doc_to_text": "Q: In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: The chief told the counselor that they took the day off.\nOptions:\n(A) The chief took the day off\n(B) The counselor took the day off\n(C) Ambiguous\nA: (A)\n\nQ: In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: The manager sent a message to the secretary, but he didn't reply yet.\nOptions:\n(A) The secretary didn't reply yet\n(B) The manager didn't reply yet\n(C) Ambiguous\nA: (A)\n\nQ: In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: Bailey will plan to meet the director at his office\nOptions:\n(A) It will be Bailey's office\n(B) It will be the director's office\n(C) Ambiguous\nA: (C)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_disambiguation_qa" diff --git a/lm_eval/tasks/bbh/flan_fewshot/dyck_languages.yaml b/lm_eval/tasks/bbh/flan_fewshot/dyck_languages.yaml index 84e308cf..2bfca6d2 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/dyck_languages.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/dyck_languages.yaml @@ -1,5 +1,5 @@ "dataset_name": "dyck_languages" "description": "Correctly close a Dyck-n word.\n\n" "doc_to_text": "Q: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: [ { [\nA: ] } ]\n\nQ: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < > ( ( [ [ ( { } ) [ < > ] ]\nA: ] ) )\n\nQ: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < [ < [ { < [ ] < { } > > } ] > { { ( ) } { < [ < > ] > }\nA: } ] >\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_dyck_languages" diff --git a/lm_eval/tasks/bbh/flan_fewshot/formal_fallacies.yaml b/lm_eval/tasks/bbh/flan_fewshot/formal_fallacies.yaml index c91769a5..a420a34c 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/formal_fallacies.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/formal_fallacies.yaml @@ -1,5 +1,5 @@ "dataset_name": "formal_fallacies" "description": "Distinguish deductively valid arguments from formal fallacies.\n\n" "doc_to_text": "Q: \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: To begin with, Lesley is a close friend of Fernando. Moreover, being a close friend of Fernando or a schoolmate of Lowell is sufficient for being a great-grandfather of Leroy. It follows that Lesley is a great-grandfather of Leroy.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: valid\n\nQ: \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: Whoever is not a great-grandfather of Clyde is a stepbrother of Brian. Being an ancestor of Dana is sufficient for not being a great-grandfather of Clyde. We may conclude: Everyone who is an ancestor of Dana is a stepbrother of Brian, too.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: valid\n\nQ: \"It is not always easy to grasp who is consuming which products. The following argument pertains to this question: Every infrequent user of Paul Mitchell shampoo is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap, or both. No regular consumer of Lush soap is a rare consumer of Nioxin shampoo and, in the same time, a loyal buyer of Caress soap. It follows that whoever is an infrequent user of Paul Mitchell shampoo is not a regular consumer of Lush soap.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: invalid\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_formal_fallacies" diff --git a/lm_eval/tasks/bbh/flan_fewshot/geometric_shapes.yaml b/lm_eval/tasks/bbh/flan_fewshot/geometric_shapes.yaml index 1dbb242f..75a72a11 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/geometric_shapes.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/geometric_shapes.yaml @@ -1,5 +1,5 @@ "dataset_name": "geometric_shapes" "description": "Name geometric shapes from their SVG paths.\n\n" "doc_to_text": "Q: This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: (F)\n\nQ: This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: (G)\n\nQ: This SVG path element draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: (D)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_geometric_shapes" diff --git a/lm_eval/tasks/bbh/flan_fewshot/hyperbaton.yaml b/lm_eval/tasks/bbh/flan_fewshot/hyperbaton.yaml index 090865b2..d1007dd0 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/hyperbaton.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/hyperbaton.yaml @@ -1,5 +1,5 @@ "dataset_name": "hyperbaton" "description": "Order adjectives correctly in English sentences.\n\n" "doc_to_text": "Q: Which sentence has the correct adjective order:\nOptions:\n(A) rubber terrible ship\n(B) terrible rubber ship\nA: (B)\n\nQ: Which sentence has the correct adjective order:\nOptions:\n(A) repulsive small Brazilian exercise ship\n(B) Brazilian repulsive exercise small ship\nA: (A)\n\nQ: Which sentence has the correct adjective order:\nOptions:\n(A) blue gold wonderful square shoe\n(B) wonderful square blue gold shoe\nA: (B)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_hyperbaton" diff --git a/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_five_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_five_objects.yaml index 67f2c1a7..68318d84 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_five_objects.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_five_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_five_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": "Q: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: (B)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: (A)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: (C)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_logical_deduction_five_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_seven_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_seven_objects.yaml index 47593a0d..e6d2a205 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_seven_objects.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_seven_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_seven_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": "Q: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: (B)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: (A)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: (C)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_logical_deduction_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_three_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_three_objects.yaml index 7264e653..ebed792e 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_three_objects.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/logical_deduction_three_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_three_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": "Q: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: (B)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: (A)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: (C)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_logical_deduction_three_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/movie_recommendation.yaml b/lm_eval/tasks/bbh/flan_fewshot/movie_recommendation.yaml index 8fb208b0..9db0e3e6 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/movie_recommendation.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/movie_recommendation.yaml @@ -1,5 +1,5 @@ "dataset_name": "movie_recommendation" "description": "Recommend movies similar to the given list of movies.\n\n" "doc_to_text": "Q: Find a movie similar to Star Wars Episode IV - A New Hope, Indiana Jones and the Last Crusade, Star Wars Episode V - The Empire Strikes Back, The Big Lebowski:\nOptions:\n(A) Tetsuo\n(B) the Ironman\n(C) The Princess Bride\n(D) The Barkley Marathons The Race That Eats Its Young\n(E) Bug\nA: (C)\n\nQ: Find a movie similar to Twister, The Silence of the Lambs, Independence Day, Braveheart:\nOptions:\n(A) They Shoot Horses\n(B) Don't They\n(C) Forrest Gump\n(D) The Salton Sea\n(E) Extreme Days\nA: (C)\n\nQ: Find a movie similar to Minority Report, Total Recall, Inside Out, Forrest Gump:\nOptions:\n(A) Phenomena\n(B) Lilting\n(C) Catwoman\n(D) Edge of Tomorrow\nA: (D)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_movie_recommendation" diff --git a/lm_eval/tasks/bbh/flan_fewshot/multistep_arithmetic_two.yaml b/lm_eval/tasks/bbh/flan_fewshot/multistep_arithmetic_two.yaml index ba5f65ca..36c3adbd 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/multistep_arithmetic_two.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/multistep_arithmetic_two.yaml @@ -1,5 +1,5 @@ "dataset_name": "multistep_arithmetic_two" "description": "Solve multi-step arithmetic problems.\n\n" "doc_to_text": "Q: ((-5 + 9 * -4 - 0) * (4 + -7 + 0 * -5)) =\nA: 123\n\nQ: ((-9 * 7 * 7 * -9) + (4 * -9 - 8 - -4)) =\nA: 3929\n\nQ: ((-3 + 5 * 8 * -4) - (9 - 8 * -7 + -9)) =\nA: -219\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_multistep_arithmetic_two" diff --git a/lm_eval/tasks/bbh/flan_fewshot/navigate.yaml b/lm_eval/tasks/bbh/flan_fewshot/navigate.yaml index 0aba5820..e896d245 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/navigate.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/navigate.yaml @@ -1,5 +1,5 @@ "dataset_name": "navigate" "description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n" "doc_to_text": "Q: If you follow these instructions, do you return to the starting point? Turn left. Turn around. Turn left. Take 7 steps. Take 2 steps. Take 4 steps. Take 8 steps.\nOptions:\n- Yes\n- No\nA: No\n\nQ: If you follow these instructions, do you return to the starting point? Turn around. Take 1 step. Take 6 steps. Turn around. Take 6 steps. Take 9 steps. Take 1 step.\nOptions:\n- Yes\n- No\nA: No\n\nQ: If you follow these instructions, do you return to the starting point? Always face forward. Take 2 steps right. Take 9 steps left. Take 7 steps right.\nOptions:\n- Yes\n- No\nA: Yes\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_navigate" diff --git a/lm_eval/tasks/bbh/flan_fewshot/object_counting.yaml b/lm_eval/tasks/bbh/flan_fewshot/object_counting.yaml index 7aa27a38..ddfc07e0 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/object_counting.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/object_counting.yaml @@ -1,5 +1,5 @@ "dataset_name": "object_counting" "description": "Questions that involve enumerating objects and asking the model to count them.\n\n" "doc_to_text": "Q: I have a blackberry, a clarinet, a nectarine, a plum, a strawberry, a banana, a flute, an orange, and a violin. How many fruits do I have?\nA: 6\n\nQ: I have an orange, a raspberry, two peaches, a blackberry, an apple, a grape, a nectarine, and three plums. How many fruits do I have?\nA: 11\n\nQ: I have a lettuce head, a head of broccoli, an onion, a stalk of celery, two carrots, a garlic, and a yam. How many vegetables do I have?\nA: 8\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_object_counting" diff --git a/lm_eval/tasks/bbh/flan_fewshot/penguins_in_a_table.yaml b/lm_eval/tasks/bbh/flan_fewshot/penguins_in_a_table.yaml index f91d9c98..7c7087d1 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/penguins_in_a_table.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/penguins_in_a_table.yaml @@ -1,5 +1,5 @@ "dataset_name": "penguins_in_a_table" "description": "Answer questions about a table of penguins and their attributes.\n\n" "doc_to_text": "Q: Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. We now add a penguin to the table:\nJames, 12, 90, 12\nHow many penguins are less than 8 years old?\nOptions:\n(A) 1\n(B) 2\n(C) 3\n(D) 4\n(E) 5\nA: (B)\n\nQ: Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. Which is the youngest penguin?\nOptions:\n(A) Louis\n(B) Bernard\n(C) Vincent\n(D) Gwen\n(E) James\nA: (B)\n\nQ: Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. What is the name of the second penguin sorted by alphabetic order?\nOptions:\n(A) Louis\n(B) Bernard\n(C) Vincent\n(D) Gwen\n(E) James\nA: (D)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_penguins_in_a_table" diff --git a/lm_eval/tasks/bbh/flan_fewshot/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/reasoning_about_colored_objects.yaml index d03dcd07..02422c1b 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/reasoning_about_colored_objects.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/reasoning_about_colored_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "reasoning_about_colored_objects" "description": "Answer extremely simple questions about the colors of objects on a surface.\n\n" "doc_to_text": "Q: On the nightstand, there is a red pencil, a purple mug, a burgundy keychain, a fuchsia teddy bear, a black plate, and a blue stress ball. What color is the stress ball?\nOptions:\n(A) red\n(B) orange\n(C) yellow\n(D) green\n(E) blue\n(F) brown\n(G) magenta\n(H) fuchsia\n(I) mauve\n(J) teal\n(K) turquoise\n(L) burgundy\n(M) silver\n(N) gold\n(O) black\n(P) grey\n(Q) purple\n(R) pink\nA: (E)\n\nQ: On the table, you see a bunch of objects arranged in a row: a purple paperclip, a pink stress ball, a brown keychain, a green scrunchiephone charger, a mauve fidget spinner, and a burgundy pen. What is the color of the object directly to the right of the stress ball?\nOptions:\n(A) red\n(B) orange\n(C) yellow\n(D) green\n(E) blue\n(F) brown\n(G) magenta\n(H) fuchsia\n(I) mauve\n(J) teal\n(K) turquoise\n(L) burgundy\n(M) silver\n(N) gold\n(O) black\n(P) grey\n(Q) purple\n(R) pink\nA: (F)\n\nQ: On the nightstand, you see the following items arranged in a row: a teal plate, a burgundy keychain, a yellow scrunchiephone charger, an orange mug, a pink notebook, and a grey cup. How many non-orange items do you see to the left of the teal item?\nOptions:\n(A) zero\n(B) one\n(C) two\n(D) three\n(E) four\n(F) five\n(G) six\nA: (A)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_reasoning_about_colored_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/ruin_names.yaml b/lm_eval/tasks/bbh/flan_fewshot/ruin_names.yaml index d12013db..b9c9b7b5 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/ruin_names.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/ruin_names.yaml @@ -1,5 +1,5 @@ "dataset_name": "ruin_names" "description": "Select the humorous edit that 'ruins' the input movie or musical artist name.\n\n" "doc_to_text": "Q: Which of the following is a humorous edit of this artist or movie name: 'whitesnake'?\nOptions:\n(A) whitesnape\n(B) whitesnapke\n(C) whitesnuake\n(D) mwhitesnake\nA: (A)\n\nQ: Which of the following is a humorous edit of this artist or movie name: 'one of our dinosaurs is missing'?\nOptions:\n(A) ofne of our dinosaurs is missing\n(B) one af our dinosaurs is missing\n(C) one of our dinosaurs is pissing\n(D) one of our dinosaur is missing\nA: (C)\n\nQ: Which of the following is a humorous edit of this artist or movie name: 'counting crows'?\nOptions:\n(A) countingy crows\n(B) counting cows\n(C) courting crows\n(D) coutnting crows\nA: (B)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_ruin_names" diff --git a/lm_eval/tasks/bbh/flan_fewshot/salient_translation_error_detection.yaml b/lm_eval/tasks/bbh/flan_fewshot/salient_translation_error_detection.yaml index bfbcfa35..81be1ce6 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/salient_translation_error_detection.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/salient_translation_error_detection.yaml @@ -1,5 +1,5 @@ "dataset_name": "salient_translation_error_detection" "description": "Detect the type of error in an English translation of a German source sentence.\n\n" "doc_to_text": "Q: The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: In der Liste der Baudenkmale in Lenzen (Elbe) sind alle Baudenkmale der brandenburgischen Stadt Lenzen (Elbe) und ihrer Ortsteile aufgelistet.\nTranslation: In the list of architectural monuments in Lenzen all architectural monuments of the Brandenburg city of Lenzen and its districts are listed.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: (D)\n\nQ: The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: Auf dieser Seite sind die Baudenkmäler der oberbayerischen Großen Kreisstadt Landsberg am Lech zusammengestellt.\nTranslation: On this page are compiled the architectural monuments of the town of Landsberg am Lech.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: (E)\n\nQ: The following translations from German to English contain a particular error. That error will be one of the following types: Named Entities: An entity (names, places, locations, etc.) is changed to a different entity. Numerical Values: Numerical values (ordinals or cardinals), dates, and/or units are changed. Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are changed. Negation or Antonyms: Introduce or remove a negation or change comparatives to their antonyms. Facts: Trivial factual errors not pertaining to the above classes are introduced in the translations. Dropped Content: A significant clause in the translation is removed. Please identify that error. Source: Łeba ist eine Kleinstadt und ein Badeort im Powiat Lęborski der polnischen Woiwodschaft Pommern.\nTranslation: Eba is not a small town and seaside resort in the Powiat Léborski county of the Pomeranian Voivodeship of Poland.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts\nA: (C)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_salient_translation_error_detection" diff --git a/lm_eval/tasks/bbh/flan_fewshot/snarks.yaml b/lm_eval/tasks/bbh/flan_fewshot/snarks.yaml index 375da1a2..803ead1a 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/snarks.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/snarks.yaml @@ -1,5 +1,5 @@ "dataset_name": "snarks" "description": "Determine which of two sentences is sarcastic.\n\nAccording to Cambridge University Dictionary, sarcasm is \"the use of remarks that clearly mean the opposite of what they say, made in order to hurt someone's feelings or to criticize something in a humorous way.\" Sarcastic sentences often contain satirical or ironic utterances, hyperboles, ambivalent or witty remarks.\n\n" "doc_to_text": "Q: Which statement is sarcastic?\nOptions:\n(A) Yes, because having interests and actively researching them is a huge waste\n(B) Yes, because having interests and actively researching them is a huge deal\nA: (A)\n\nQ: Which statement is sarcastic?\nOptions:\n(A) No one is going to disagree with you on this. Avoiding ad hominem attacks really help your case\n(B) No one is going to disagree with you on this. Ad hominem attacks really help your case\nA: (B)\n\nQ: Which statement is sarcastic?\nOptions:\n(A) Consistency in the league's punishments? What do you think this is supposed to be, politics?\n(B) Consistency in the league's punishments? What do you think this is supposed to be, moral?\nA: (A)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_snarks" diff --git a/lm_eval/tasks/bbh/flan_fewshot/sports_understanding.yaml b/lm_eval/tasks/bbh/flan_fewshot/sports_understanding.yaml index f72cdba9..25a321f8 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/sports_understanding.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/sports_understanding.yaml @@ -1,5 +1,5 @@ "dataset_name": "sports_understanding" "description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n" "doc_to_text": "Q: Is the following sentence plausible? \"Bam Adebayo scored a reverse layup in the Western Conference Finals.\"\nA: yes\n\nQ: Is the following sentence plausible? \"Santi Cazorla scored a touchdown.\"\nA: no\n\nQ: Is the following sentence plausible? \"DeMar DeRozan was called for the goal tend.\"\nA: yes\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_sports_understanding" diff --git a/lm_eval/tasks/bbh/flan_fewshot/temporal_sequences.yaml b/lm_eval/tasks/bbh/flan_fewshot/temporal_sequences.yaml index 0f5e5380..2cf283fe 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/temporal_sequences.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/temporal_sequences.yaml @@ -1,5 +1,5 @@ "dataset_name": "temporal_sequences" "description": "Task description: Answer questions about which times certain events could have occurred.\n\n" "doc_to_text": "Q: Today, Emily went to the museum. Between what times could they have gone?\nWe know that:\nEmily woke up at 1pm.\nElizabeth saw Emily reading at the library from 2pm to 4pm.\nJessica saw Emily watching a movie at the theater from 4pm to 5pm.\nLeslie saw Emily waiting at the airport from 5pm to 6pm.\nWilliam saw Emily buying clothes at the mall from 6pm to 7pm.\nThe museum was closed after 7pm.\nBetween what times could Emily have gone to the museum?\nOptions:\n(A) 1pm to 2pm\n(B) 6pm to 7pm\n(C) 5pm to 6pm\n(D) 2pm to 4pm\nA: (A)\n\nQ: Today, Elizabeth went to the amusement park. Between what times could they have gone?\nWe know that:\nElizabeth woke up at 7am.\nDavid saw Elizabeth fixing their computer at the electronic store from 1pm to 2pm.\nSarah saw Elizabeth playing tennis at the tennis court from 2pm to 3pm.\nSusan saw Elizabeth walking towards the Statue of Liberty from 3pm to 6pm.\nAndrew saw Elizabeth taking photos near the Eiffel Tower from 6pm to 9pm.\nEmily saw Elizabeth getting a coffee at the cafe from 9pm to 10pm.\nThe amusement park was closed after 10pm.\nBetween what times could Elizabeth have gone to the amusement park?\nOptions:\n(A) 7am to 1pm\n(B) 9pm to 10pm\n(C) 1pm to 2pm\n(D) 3pm to 6pm\nA: (A)\n\nQ: Today, Tiffany went to the beach. Between what times could they have gone?\nWe know that:\nTiffany woke up at 5am.\nBetty saw Tiffany getting a coffee at the cafe from 5am to 6am.\nJessica saw Tiffany working at the office from 6am to 9am.\nJohn saw Tiffany stretching at a yoga studio from 9am to 12pm.\nSean saw Tiffany sitting on a rooftop from 12pm to 2pm.\nSarah saw Tiffany playing tennis at the tennis court from 2pm to 3pm.\nThe beach was closed after 4pm.\nBetween what times could Tiffany have gone to the beach?\nOptions:\n(A) 9am to 12pm\n(B) 12pm to 2pm\n(C) 5am to 6am\n(D) 3pm to 4pm\nA: (D)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_temporal_sequences" diff --git a/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_five_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_five_objects.yaml index 112ede19..f8b085dc 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_five_objects.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_five_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_five_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": "Q: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: (A)\n\nQ: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: (C)\n\nQ: Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: (C)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_tracking_shuffled_objects_five_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_seven_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_seven_objects.yaml index 83821c54..3c2f3ca4 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_seven_objects.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_seven_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_seven_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": "Q: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: (A)\n\nQ: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: (C)\n\nQ: Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: (C)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_tracking_shuffled_objects_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_three_objects.yaml b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_three_objects.yaml index afee9bb9..ef406244 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_three_objects.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/tracking_shuffled_objects_three_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_three_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": "Q: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: (A)\n\nQ: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: (C)\n\nQ: Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: (C)\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_tracking_shuffled_objects_three_objects" diff --git a/lm_eval/tasks/bbh/flan_fewshot/web_of_lies.yaml b/lm_eval/tasks/bbh/flan_fewshot/web_of_lies.yaml index 2f1c5686..23427364 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/web_of_lies.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/web_of_lies.yaml @@ -1,5 +1,5 @@ "dataset_name": "web_of_lies" "description": "Evaluate a random boolean function expressed as a word problem.\n\n" "doc_to_text": "Q: Question: Fidel tells the truth. Jerry says Fidel tells the truth. Vina says Jerry tells the truth. Millicent says Vina lies. Raymond says Millicent lies. Does Raymond tell the truth?\nA: Yes\n\nQ: Question: Kristian lies. Millie says Kristian lies. Maybelle says Millie tells the truth. Fidel says Maybelle lies. Leda says Fidel lies. Does Leda tell the truth?\nA: Yes\n\nQ: Question: Kristian tells the truth. Michaela says Kristian lies. Raymond says Michaela tells the truth. Osvaldo says Raymond tells the truth. Jamey says Osvaldo tells the truth. Does Jamey tell the truth?\nA: No\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_web_of_lies" diff --git a/lm_eval/tasks/bbh/flan_fewshot/word_sorting.yaml b/lm_eval/tasks/bbh/flan_fewshot/word_sorting.yaml index 43b7d43d..9ef1a093 100644 --- a/lm_eval/tasks/bbh/flan_fewshot/word_sorting.yaml +++ b/lm_eval/tasks/bbh/flan_fewshot/word_sorting.yaml @@ -1,5 +1,5 @@ "dataset_name": "word_sorting" "description": "Sort a list of words.\n\n" "doc_to_text": "Q: Sort the following words alphabetically: List: oven costume counterpart\nA: costume counterpart oven\n\nQ: Sort the following words alphabetically: List: hypochlorite ponderosa phone credulity\nA: credulity hypochlorite phone ponderosa\n\nQ: Sort the following words alphabetically: List: newt arson parthia seismography mugho aspect census\nA: arson aspect census mugho newt parthia seismography\n\nQ: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_fewshot_template_yaml" "task": "bbh_flan_fewshot_word_sorting" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/boolean_expressions.yaml b/lm_eval/tasks/bbh/flan_zeroshot/boolean_expressions.yaml index 7098c7e3..5cfef9fe 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/boolean_expressions.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/boolean_expressions.yaml @@ -1,5 +1,5 @@ "dataset_name": "boolean_expressions" "description": "Evaluate the result of a random Boolean expression.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_boolean_expressions" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/causal_judgement.yaml b/lm_eval/tasks/bbh/flan_zeroshot/causal_judgement.yaml index 953419bd..a09d701f 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/causal_judgement.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/causal_judgement.yaml @@ -1,5 +1,5 @@ "dataset_name": "causal_judgement" "description": "Answer questions about causal attribution.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_causal_judgement" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/date_understanding.yaml b/lm_eval/tasks/bbh/flan_zeroshot/date_understanding.yaml index 99255c90..77142f4a 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/date_understanding.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/date_understanding.yaml @@ -1,5 +1,5 @@ "dataset_name": "date_understanding" "description": "Infer the date from context.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_date_understanding" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/disambiguation_qa.yaml b/lm_eval/tasks/bbh/flan_zeroshot/disambiguation_qa.yaml index 65c515cc..8b19c2b2 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/disambiguation_qa.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/disambiguation_qa.yaml @@ -1,5 +1,5 @@ "dataset_name": "disambiguation_qa" "description": "Clarify the meaning of sentences with ambiguous pronouns.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_disambiguation_qa" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/dyck_languages.yaml b/lm_eval/tasks/bbh/flan_zeroshot/dyck_languages.yaml index 10b87a70..6ca7a8a3 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/dyck_languages.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/dyck_languages.yaml @@ -1,5 +1,5 @@ "dataset_name": "dyck_languages" "description": "Correctly close a Dyck-n word.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_dyck_languages" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/formal_fallacies.yaml b/lm_eval/tasks/bbh/flan_zeroshot/formal_fallacies.yaml index 7fcf6920..b5622c57 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/formal_fallacies.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/formal_fallacies.yaml @@ -1,5 +1,5 @@ "dataset_name": "formal_fallacies" "description": "Distinguish deductively valid arguments from formal fallacies.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_formal_fallacies" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/geometric_shapes.yaml b/lm_eval/tasks/bbh/flan_zeroshot/geometric_shapes.yaml index ee6082b9..abae12e6 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/geometric_shapes.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/geometric_shapes.yaml @@ -1,5 +1,5 @@ "dataset_name": "geometric_shapes" "description": "Name geometric shapes from their SVG paths.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_geometric_shapes" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/hyperbaton.yaml b/lm_eval/tasks/bbh/flan_zeroshot/hyperbaton.yaml index 3e82c854..8cb072b8 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/hyperbaton.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/hyperbaton.yaml @@ -1,5 +1,5 @@ "dataset_name": "hyperbaton" "description": "Order adjectives correctly in English sentences.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_hyperbaton" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_five_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_five_objects.yaml index 1e4adeb4..e4a4dc9e 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_five_objects.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_five_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_five_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_logical_deduction_five_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_seven_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_seven_objects.yaml index 910ca139..bb96b78c 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_seven_objects.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_seven_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_seven_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_logical_deduction_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_three_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_three_objects.yaml index 405cf023..2244b5b3 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_three_objects.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/logical_deduction_three_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "logical_deduction_three_objects" "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_logical_deduction_three_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/movie_recommendation.yaml b/lm_eval/tasks/bbh/flan_zeroshot/movie_recommendation.yaml index 54dc45f3..b735bb76 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/movie_recommendation.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/movie_recommendation.yaml @@ -1,5 +1,5 @@ "dataset_name": "movie_recommendation" "description": "Recommend movies similar to the given list of movies.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_movie_recommendation" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/multistep_arithmetic_two.yaml b/lm_eval/tasks/bbh/flan_zeroshot/multistep_arithmetic_two.yaml index 494b94fe..3db4c192 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/multistep_arithmetic_two.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/multistep_arithmetic_two.yaml @@ -1,5 +1,5 @@ "dataset_name": "multistep_arithmetic_two" "description": "Solve multi-step arithmetic problems.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_multistep_arithmetic_two" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/navigate.yaml b/lm_eval/tasks/bbh/flan_zeroshot/navigate.yaml index 3f107003..979b7269 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/navigate.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/navigate.yaml @@ -1,5 +1,5 @@ "dataset_name": "navigate" "description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_navigate" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/object_counting.yaml b/lm_eval/tasks/bbh/flan_zeroshot/object_counting.yaml index 8e1a675e..131f3418 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/object_counting.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/object_counting.yaml @@ -1,5 +1,5 @@ "dataset_name": "object_counting" "description": "Questions that involve enumerating objects and asking the model to count them.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_object_counting" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/penguins_in_a_table.yaml b/lm_eval/tasks/bbh/flan_zeroshot/penguins_in_a_table.yaml index c3c4138c..234b8392 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/penguins_in_a_table.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/penguins_in_a_table.yaml @@ -1,5 +1,5 @@ "dataset_name": "penguins_in_a_table" "description": "Answer questions about a table of penguins and their attributes.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_penguins_in_a_table" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/reasoning_about_colored_objects.yaml index bbe01119..73f5772d 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/reasoning_about_colored_objects.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/reasoning_about_colored_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "reasoning_about_colored_objects" "description": "Answer extremely simple questions about the colors of objects on a surface.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_reasoning_about_colored_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/ruin_names.yaml b/lm_eval/tasks/bbh/flan_zeroshot/ruin_names.yaml index b43e9414..96c1ab6e 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/ruin_names.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/ruin_names.yaml @@ -1,5 +1,5 @@ "dataset_name": "ruin_names" "description": "Select the humorous edit that 'ruins' the input movie or musical artist name.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_ruin_names" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/salient_translation_error_detection.yaml b/lm_eval/tasks/bbh/flan_zeroshot/salient_translation_error_detection.yaml index 43ebe9f0..fcc7bad3 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/salient_translation_error_detection.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/salient_translation_error_detection.yaml @@ -1,5 +1,5 @@ "dataset_name": "salient_translation_error_detection" "description": "Detect the type of error in an English translation of a German source sentence.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_salient_translation_error_detection" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/snarks.yaml b/lm_eval/tasks/bbh/flan_zeroshot/snarks.yaml index df46e580..17fa9673 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/snarks.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/snarks.yaml @@ -1,5 +1,5 @@ "dataset_name": "snarks" -"description": "Determine which of two sentences is sarcastic.\n\n" +"description": "Determine which of two sentences is sarcastic.\n\nAccording to Cambridge University Dictionary, sarcasm is \"the use of remarks that clearly mean the opposite of what they say, made in order to hurt someone's feelings or to criticize something in a humorous way.\" Sarcastic sentences often contain satirical or ironic utterances, hyperboles, ambivalent or witty remarks.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_snarks" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/sports_understanding.yaml b/lm_eval/tasks/bbh/flan_zeroshot/sports_understanding.yaml index fdbc3287..0c1edbd9 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/sports_understanding.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/sports_understanding.yaml @@ -1,5 +1,5 @@ "dataset_name": "sports_understanding" "description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_sports_understanding" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/temporal_sequences.yaml b/lm_eval/tasks/bbh/flan_zeroshot/temporal_sequences.yaml index 4a526778..c9cea8b0 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/temporal_sequences.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/temporal_sequences.yaml @@ -1,5 +1,5 @@ "dataset_name": "temporal_sequences" "description": "Task description: Answer questions about which times certain events could have occurred.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_temporal_sequences" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_five_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_five_objects.yaml index 39d96c56..c28d374f 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_five_objects.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_five_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_five_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_tracking_shuffled_objects_five_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_seven_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_seven_objects.yaml index c1f42e8f..ed47f69b 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_seven_objects.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_seven_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_seven_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_tracking_shuffled_objects_seven_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_three_objects.yaml b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_three_objects.yaml index 0e02323d..348fc541 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_three_objects.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/tracking_shuffled_objects_three_objects.yaml @@ -1,5 +1,5 @@ "dataset_name": "tracking_shuffled_objects_three_objects" "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_tracking_shuffled_objects_three_objects" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/web_of_lies.yaml b/lm_eval/tasks/bbh/flan_zeroshot/web_of_lies.yaml index 179aab6a..d886cd26 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/web_of_lies.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/web_of_lies.yaml @@ -1,5 +1,5 @@ "dataset_name": "web_of_lies" "description": "Evaluate a random boolean function expressed as a word problem.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_web_of_lies" diff --git a/lm_eval/tasks/bbh/flan_zeroshot/word_sorting.yaml b/lm_eval/tasks/bbh/flan_zeroshot/word_sorting.yaml index 9317b875..db12d15b 100644 --- a/lm_eval/tasks/bbh/flan_zeroshot/word_sorting.yaml +++ b/lm_eval/tasks/bbh/flan_zeroshot/word_sorting.yaml @@ -1,5 +1,5 @@ "dataset_name": "word_sorting" "description": "Sort a list of words.\n\n" "doc_to_text": "Q: {{input}}\nA:" -"include": "_template_yaml" +"include": "_flan_zeroshot_template_yaml" "task": "bbh_flan_zeroshot_word_sorting" -- GitLab From ec03783fde292bd79c8f4df3c80e280756ee3994 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 16:07:19 +0000 Subject: [PATCH 112/264] update for held-in tasks --- .../flan/prompt_templates/flan_anli.yaml | 36 +++++++++---------- .../flan/prompt_templates/flan_arc.yaml | 15 ++++---- .../flan/yaml_templates/cot_template_yaml | 2 -- .../flan/yaml_templates/held_in_template_yaml | 2 -- lm_eval/benchmarks/flan_anli.yaml | 17 +++++++++ lm_eval/benchmarks/flan_boolq.yaml | 7 ++++ lm_eval/benchmarks/flan_held_in.yaml | 14 ++++---- lm_eval/benchmarks/flan_rte.yaml | 7 ++++ 8 files changed, 64 insertions(+), 36 deletions(-) create mode 100644 lm_eval/benchmarks/flan_anli.yaml create mode 100644 lm_eval/benchmarks/flan_boolq.yaml create mode 100644 lm_eval/benchmarks/flan_rte.yaml diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml index 9b9f6705..6ff78840 100644 --- a/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_anli.yaml @@ -1,29 +1,29 @@ # Flan Prompt Templates prompts: "template-0": - doc_to_text: "{{context}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nI think the answer is" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" "template-1": - doc_to_text: "{{context}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" "template-2": - doc_to_text: "{{context}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" "template-3": - doc_to_text: "{{context}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" "template-4": - doc_to_text: "{{context}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nThe answer is:" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" "template-5": - doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{context}}\n\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" "template-6": - doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{context}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" "template-7": - doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{context}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" "template-8": - doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{context}}\nOPTIONS:\n- Yes\n- It\'s impossible to say\n- No" - doc_to_target: "{{['Yes', 'It\'s impossible to say', 'No'][label]}}" + doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" diff --git a/lm_eval/benchmarks/flan/prompt_templates/flan_arc.yaml b/lm_eval/benchmarks/flan/prompt_templates/flan_arc.yaml index c9135a51..4ee34e65 100644 --- a/lm_eval/benchmarks/flan/prompt_templates/flan_arc.yaml +++ b/lm_eval/benchmarks/flan/prompt_templates/flan_arc.yaml @@ -2,23 +2,22 @@ prompts: "template-0": doc_to_text: "{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" - doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" "template-1": doc_to_text: "Question: {{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}\nAnswer:" - doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" "template-2": doc_to_text: "Question: {{question}}\n\nWhat is the correct answer to the question from the following choices?\nOPTIONS:\n- {{choices.text|join('\n- ')}}" - doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" "template-3": doc_to_text: "Q: {{question}}\nWhat is the correct answer to this question?\nOPTIONS:\n- {{choices.text|join('\n- ')}}...A:" - doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" "template-4": doc_to_text: "Choose your answer?\n\n{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" - doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" "template-5": doc_to_text: "Answer the question\n\n{{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}" - doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" "template-6": doc_to_text: "{{question}}\n\nPick the answer from these options\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" - doc_to_target: "{{[choices.text][choices.label.index(answerKey)]}}" - + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" diff --git a/lm_eval/benchmarks/flan/yaml_templates/cot_template_yaml b/lm_eval/benchmarks/flan/yaml_templates/cot_template_yaml index 0cb0d16e..cbd40849 100644 --- a/lm_eval/benchmarks/flan/yaml_templates/cot_template_yaml +++ b/lm_eval/benchmarks/flan/yaml_templates/cot_template_yaml @@ -6,8 +6,6 @@ metric_list: - metric: exact_match aggregation: mean higher_is_better: true - ignore_case: true - ignore_punctuation: true generation_kwargs: until: - "\n\n" diff --git a/lm_eval/benchmarks/flan/yaml_templates/held_in_template_yaml b/lm_eval/benchmarks/flan/yaml_templates/held_in_template_yaml index 2f4a4c84..e09daca2 100644 --- a/lm_eval/benchmarks/flan/yaml_templates/held_in_template_yaml +++ b/lm_eval/benchmarks/flan/yaml_templates/held_in_template_yaml @@ -4,8 +4,6 @@ metric_list: - metric: exact_match aggregation: mean higher_is_better: true - ignore_case: true - ignore_punctuation: true generation_kwargs: until: - "" diff --git a/lm_eval/benchmarks/flan_anli.yaml b/lm_eval/benchmarks/flan_anli.yaml new file mode 100644 index 00000000..d6201bb7 --- /dev/null +++ b/lm_eval/benchmarks/flan_anli.yaml @@ -0,0 +1,17 @@ +group: flan_anli +task: + - include: flan/yaml_templates/held_in_template_yaml + task: anli_r1 + dataset_path: anli + use_prompt: flan/prompt_templates/flan_anli.yaml:* + validation_split: dev_r1 + - include: flan/yaml_templates/held_in_template_yaml + task: anli_r2 + dataset_path: anli + use_prompt: flan/prompt_templates/flan_anli.yaml:* + validation_split: dev_r2 + - include: flan/yaml_templates/held_in_template_yaml + task: anli_r3 + dataset_path: anli + use_prompt: flan/prompt_templates/flan_anli.yaml:* + validation_split: dev_r3 diff --git a/lm_eval/benchmarks/flan_boolq.yaml b/lm_eval/benchmarks/flan_boolq.yaml new file mode 100644 index 00000000..f7ca4796 --- /dev/null +++ b/lm_eval/benchmarks/flan_boolq.yaml @@ -0,0 +1,7 @@ +group: flan_boolq +task: + - include: flan/yaml_templates/held_in_template_yaml + dataset_path: super_glue + dataset_name: boolq + use_prompt: flan/prompt_templates/flan_boolq.yaml:* + validation_split: validation diff --git a/lm_eval/benchmarks/flan_held_in.yaml b/lm_eval/benchmarks/flan_held_in.yaml index f6d62f03..f1965c9d 100644 --- a/lm_eval/benchmarks/flan_held_in.yaml +++ b/lm_eval/benchmarks/flan_held_in.yaml @@ -26,12 +26,14 @@ task: use_prompt: flan/prompt_templates/flan_anli.yaml:* validation_split: dev_r3 - include: flan/yaml_templates/held_in_template_yaml - task: ai2_arc - dataset_path: ARC-Easy - use_prompt: local:* + task: arc_easy + dataset_path: ai2_arc + dataset_name: ARC-Easy + use_prompt: flan/prompt_templates/flan_arc.yaml:* validation_split: validation - include: flan/yaml_templates/held_in_template_yaml - task: ai2_arc - dataset_path: ARC-Challange - use_prompt: local:* + task: arc_challenge + dataset_path: ai2_arc + dataset_name: ARC-Challenge + use_prompt: flan/prompt_templates/flan_arc.yaml:* validation_split: validation diff --git a/lm_eval/benchmarks/flan_rte.yaml b/lm_eval/benchmarks/flan_rte.yaml new file mode 100644 index 00000000..cf5832bf --- /dev/null +++ b/lm_eval/benchmarks/flan_rte.yaml @@ -0,0 +1,7 @@ +group: flan_rte +task: + - include: flan/yaml_templates/held_in_template_yaml + dataset_path: super_glue + dataset_name: rte + use_prompt: flan/prompt_templates/flan_rte.yaml:* + validation_split: validation -- GitLab From 8c39bfc7038511001f26fa14261716ce988d8591 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 4 Sep 2023 16:42:48 +0000 Subject: [PATCH 113/264] add codexglue --- lm_eval/tasks/code_x_glue/code-text/bleu.py | 213 ++++++++++++++++++ lm_eval/tasks/code_x_glue/code-text/go.yaml | 19 ++ lm_eval/tasks/code_x_glue/code-text/java.yaml | 19 ++ .../code_x_glue/code-text/javascript.yaml | 19 ++ lm_eval/tasks/code_x_glue/code-text/php.yaml | 19 ++ .../tasks/code_x_glue/code-text/python.yaml | 19 ++ lm_eval/tasks/code_x_glue/code-text/ruby.yaml | 19 ++ lm_eval/tasks/code_x_glue/code-text/utils.py | 14 ++ 8 files changed, 341 insertions(+) create mode 100644 lm_eval/tasks/code_x_glue/code-text/bleu.py create mode 100644 lm_eval/tasks/code_x_glue/code-text/go.yaml create mode 100644 lm_eval/tasks/code_x_glue/code-text/java.yaml create mode 100644 lm_eval/tasks/code_x_glue/code-text/javascript.yaml create mode 100644 lm_eval/tasks/code_x_glue/code-text/php.yaml create mode 100644 lm_eval/tasks/code_x_glue/code-text/python.yaml create mode 100644 lm_eval/tasks/code_x_glue/code-text/ruby.yaml create mode 100644 lm_eval/tasks/code_x_glue/code-text/utils.py diff --git a/lm_eval/tasks/code_x_glue/code-text/bleu.py b/lm_eval/tasks/code_x_glue/code-text/bleu.py new file mode 100644 index 00000000..50243474 --- /dev/null +++ b/lm_eval/tasks/code_x_glue/code-text/bleu.py @@ -0,0 +1,213 @@ +#!/usr/bin/python + +''' +This script was adapted from the original version by hieuhoang1972 which is part of MOSES. +''' + +# $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $ + +'''Provides: + +cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test(). +cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked(). +score_cooked(alltest, n=4): Score a list of cooked test sentences. + +score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids. + +The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible. +''' + +import sys, math, re, xml.sax.saxutils +import subprocess +import os + +# Added to bypass NIST-style pre-processing of hyp and ref files -- wade +nonorm = 0 + +preserve_case = False +eff_ref_len = "shortest" + +normalize1 = [ + ('', ''), # strip "skipped" tags + (r'-\n', ''), # strip end-of-line hyphenation and join lines + (r'\n', ' '), # join lines +# (r'(\d)\s+(?=\d)', r'\1'), # join digits +] +normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1] + +normalize2 = [ + (r'([\{-\~\[-\` -\&\(-\+\:-\@\/])',r' \1 '), # tokenize punctuation. apostrophe is missing + (r'([^0-9])([\.,])',r'\1 \2 '), # tokenize period and comma unless preceded by a digit + (r'([\.,])([^0-9])',r' \1 \2'), # tokenize period and comma unless followed by a digit + (r'([0-9])(-)',r'\1 \2 ') # tokenize dash when preceded by a digit +] +normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2] + +def normalize(s): + '''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.''' + # Added to bypass NIST-style pre-processing of hyp and ref files -- wade + if (nonorm): + return s.split() + if type(s) is not str: + s = " ".join(s) + # language-independent part: + for (pattern, replace) in normalize1: + s = re.sub(pattern, replace, s) + s = xml.sax.saxutils.unescape(s, {'"':'"'}) + # language-dependent part (assuming Western languages): + s = " %s " % s + if not preserve_case: + s = s.lower() # this might not be identical to the original + for (pattern, replace) in normalize2: + s = re.sub(pattern, replace, s) + return s.split() + +def count_ngrams(words, n=4): + counts = {} + for k in range(1,n+1): + for i in range(len(words)-k+1): + ngram = tuple(words[i:i+k]) + counts[ngram] = counts.get(ngram, 0)+1 + return counts + +def cook_refs(refs, n=4): + '''Takes a list of reference sentences for a single segment + and returns an object that encapsulates everything that BLEU + needs to know about them.''' + + refs = [normalize(ref) for ref in refs] + maxcounts = {} + for ref in refs: + counts = count_ngrams(ref, n) + for (ngram,count) in counts.items(): + maxcounts[ngram] = max(maxcounts.get(ngram,0), count) + return ([len(ref) for ref in refs], maxcounts) + +def cook_test(test, item, n=4): + '''Takes a test sentence and returns an object that + encapsulates everything that BLEU needs to know about it.''' + (reflens, refmaxcounts)=item + test = normalize(test) + result = {} + result["testlen"] = len(test) + + # Calculate effective reference sentence length. + + if eff_ref_len == "shortest": + result["reflen"] = min(reflens) + elif eff_ref_len == "average": + result["reflen"] = float(sum(reflens))/len(reflens) + elif eff_ref_len == "closest": + min_diff = None + for reflen in reflens: + if min_diff is None or abs(reflen-len(test)) < min_diff: + min_diff = abs(reflen-len(test)) + result['reflen'] = reflen + + result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)] + + result['correct'] = [0]*n + counts = count_ngrams(test, n) + for (ngram, count) in counts.items(): + result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count) + + return result + +def score_cooked(allcomps, n=4, ground=0, smooth=1): + totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n} + for comps in allcomps: + for key in ['testlen','reflen']: + totalcomps[key] += comps[key] + for key in ['guess','correct']: + for k in range(n): + totalcomps[key][k] += comps[key][k] + logbleu = 0.0 + all_bleus = [] + for k in range(n): + correct = totalcomps['correct'][k] + guess = totalcomps['guess'][k] + addsmooth = 0 + if smooth == 1 and k > 0: + addsmooth = 1 + logbleu += math.log(correct + addsmooth + sys.float_info.min)-math.log(guess + addsmooth+ sys.float_info.min) + if guess == 0: + all_bleus.append(-10000000) + else: + all_bleus.append(math.log(correct + sys.float_info.min)-math.log( guess )) + + logbleu /= float(n) + all_bleus.insert(0, logbleu) + + brevPenalty = min(0,1-float(totalcomps['reflen'] + 1)/(totalcomps['testlen'] + 1)) + for i in range(len(all_bleus)): + if i ==0: + all_bleus[i] += brevPenalty + all_bleus[i] = math.exp(all_bleus[i]) + return all_bleus + +def bleu(refs, candidate, ground=0, smooth=1): + refs = cook_refs(refs) + test = cook_test(candidate, refs) + return score_cooked([test], ground=ground, smooth=smooth) + +def splitPuncts(line): + return ' '.join(re.findall(r"[\w]+|[^\s\w]", line)) + +def computeMaps(predictions, goldfile): + predictionMap = {} + goldMap = {} + gf = open(goldfile, 'r') + + for row in predictions: + cols = row.strip().split('\t') + if len(cols) == 1: + (rid, pred) = (cols[0], '') + else: + (rid, pred) = (cols[0], cols[1]) + predictionMap[rid] = [splitPuncts(pred.strip().lower())] + + for row in gf: + (rid, pred) = row.split('\t') + if rid in predictionMap: # Only insert if the id exists for the method + if rid not in goldMap: + goldMap[rid] = [] + goldMap[rid].append(splitPuncts(pred.strip().lower())) + + sys.stderr.write('Total: ' + str(len(goldMap)) + '\n') + return (goldMap, predictionMap) + + +#m1 is the reference map +#m2 is the prediction map +def bleuFromMaps(m1, m2): + score = [0] * 5 + num = 0.0 + + for key in m1: + if key in m2: + bl = bleu(m1[key], m2[key][0]) + score = [ score[i] + bl[i] for i in range(0, len(bl))] + num += 1 + return [s * 100.0 / num for s in score] + + +def smoothed_bleu_4(references, predictions, **kwargs): + + predictionMap = {} + goldMap = {} + + for rid, pred in enumerate(predictions): + predictionMap[rid] = [splitPuncts(pred.strip().lower())] + + for rid, row in enumerate(references): + goldMap[rid] = [splitPuncts(row.strip().lower())] + + return bleuFromMaps(goldMap, predictionMap)[0] + +if __name__ == '__main__': + reference_file = sys.argv[1] + predictions = [] + for row in sys.stdin: + predictions.append(row) + (goldMap, predictionMap) = computeMaps(predictions, reference_file) + print (bleuFromMaps(goldMap, predictionMap)[0]) diff --git a/lm_eval/tasks/code_x_glue/code-text/go.yaml b/lm_eval/tasks/code_x_glue/code-text/go.yaml new file mode 100644 index 00000000..3a4033c6 --- /dev/null +++ b/lm_eval/tasks/code_x_glue/code-text/go.yaml @@ -0,0 +1,19 @@ +group: + - codexglue_code2text +task: code2text_go +dataset_path: CM/codexglue_code2text_go +training_split: train +validation_split: validation +test_split: test +output_type: greedy_until +generation_kwargs: + num_beams: 10 + max_length: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True diff --git a/lm_eval/tasks/code_x_glue/code-text/java.yaml b/lm_eval/tasks/code_x_glue/code-text/java.yaml new file mode 100644 index 00000000..141673c9 --- /dev/null +++ b/lm_eval/tasks/code_x_glue/code-text/java.yaml @@ -0,0 +1,19 @@ +group: + - codexglue_code2text +task: code2text_java +dataset_path: CM/codexglue_code2text_java +training_split: train +validation_split: validation +test_split: test +output_type: greedy_until +generation_kwargs: + num_beams: 10 + max_length: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True diff --git a/lm_eval/tasks/code_x_glue/code-text/javascript.yaml b/lm_eval/tasks/code_x_glue/code-text/javascript.yaml new file mode 100644 index 00000000..c537e50d --- /dev/null +++ b/lm_eval/tasks/code_x_glue/code-text/javascript.yaml @@ -0,0 +1,19 @@ +group: + - codexglue_code2text +task: code2text_javascript +dataset_path: CM/codexglue_code2text_javascript +training_split: train +validation_split: validation +test_split: test +output_type: greedy_until +generation_kwargs: + num_beams: 10 + max_length: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True diff --git a/lm_eval/tasks/code_x_glue/code-text/php.yaml b/lm_eval/tasks/code_x_glue/code-text/php.yaml new file mode 100644 index 00000000..9137bdaf --- /dev/null +++ b/lm_eval/tasks/code_x_glue/code-text/php.yaml @@ -0,0 +1,19 @@ +group: + - codexglue_code2text +task: code2text_php +dataset_path: CM/codexglue_code2text_php +training_split: train +validation_split: validation +test_split: test +output_type: greedy_until +generation_kwargs: + num_beams: 10 + max_length: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True diff --git a/lm_eval/tasks/code_x_glue/code-text/python.yaml b/lm_eval/tasks/code_x_glue/code-text/python.yaml new file mode 100644 index 00000000..a98bfdba --- /dev/null +++ b/lm_eval/tasks/code_x_glue/code-text/python.yaml @@ -0,0 +1,19 @@ +group: + - codexglue_code2text +task: code2text_python +dataset_path: CM/codexglue_code2text_python +training_split: train +validation_split: validation +test_split: test +output_type: greedy_until +generation_kwargs: + num_beams: 10 + max_length: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True diff --git a/lm_eval/tasks/code_x_glue/code-text/ruby.yaml b/lm_eval/tasks/code_x_glue/code-text/ruby.yaml new file mode 100644 index 00000000..d6562d4c --- /dev/null +++ b/lm_eval/tasks/code_x_glue/code-text/ruby.yaml @@ -0,0 +1,19 @@ +group: + - codexglue_code2text +task: code2text_ruby +dataset_path: CM/codexglue_code2text_ruby +training_split: train +validation_split: validation +test_split: test +output_type: greedy_until +generation_kwargs: + num_beams: 10 + max_length: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True diff --git a/lm_eval/tasks/code_x_glue/code-text/utils.py b/lm_eval/tasks/code_x_glue/code-text/utils.py new file mode 100644 index 00000000..89cbbdf3 --- /dev/null +++ b/lm_eval/tasks/code_x_glue/code-text/utils.py @@ -0,0 +1,14 @@ + +def doc_to_text(doc): + + inputs = ' '.join(doc['code_tokens']).replace('\n',' ') + inputs = ' '.join(inputs.strip().split()) + + return inputs + +def doc_to_target(doc): + + targets = ' '.join(doc['docstring_tokens']).replace('\n','') + targets = ' '.join(targets.strip().split()) + + return targets \ No newline at end of file -- GitLab From fdb601a547e45eef0ca10b5f2a8487eb5f5a0a89 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Mon, 4 Sep 2023 17:43:15 +0000 Subject: [PATCH 114/264] add top-level import --- lm_eval/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lm_eval/__init__.py b/lm_eval/__init__.py index e69de29b..317c0291 100644 --- a/lm_eval/__init__.py +++ b/lm_eval/__init__.py @@ -0,0 +1 @@ +from .evaluator import evaluate, simple_evaluate -- GitLab From f96f330f6698e7062815811c6c01fc041a189380 Mon Sep 17 00:00:00 2001 From: Hailey Schoelkopf <65563625+haileyschoelkopf@users.noreply.github.com> Date: Mon, 4 Sep 2023 14:48:05 -0400 Subject: [PATCH 115/264] placate precommit --- lm_eval/models/huggingface.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index 2611c3c1..19018890 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -289,14 +289,15 @@ class HFLM(LM): "Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes`. If the desired GPU is being used, this message is safe to ignore." ) else: - assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU], "Unsupported distributed type provided. Only DDP and FSDP are supported." + assert accelerator.distributed_type in [ + DistributedType.FSDP, + DistributedType.MULTI_GPU + ], "Unsupported distributed type provided. Only DDP and FSDP are supported." if accelerator.distributed_type == DistributedType.FSDP: - self._model = accelerator.prepare( - self.model - ) + self._model = accelerator.prepare(self.model) else: self._model = accelerator.prepare_model( - self.model, evaluation_mode = True + self.model, evaluation_mode=True ) self._device = torch.device(f"cuda:{accelerator.local_process_index}") self.accelerator = accelerator -- GitLab From 6cc1ae7c0b0cb535af593fe1fad5d84912d0c59d Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Tue, 5 Sep 2023 01:10:00 +0100 Subject: [PATCH 116/264] consolidated tasks testing --- .github/workflows/new_tasks.yml | 10 +- tests/extra/__init__.py | 0 tests/extra/test_new_tasks.py | 129 ----------- tests/extra/test_utils.py | 23 -- tests/test_tasks.py | 215 +++++++++--------- .../{extra/utilities_testing.py => utils.py} | 23 +- 6 files changed, 129 insertions(+), 271 deletions(-) delete mode 100644 tests/extra/__init__.py delete mode 100644 tests/extra/test_new_tasks.py delete mode 100644 tests/extra/test_utils.py rename tests/{extra/utilities_testing.py => utils.py} (63%) diff --git a/.github/workflows/new_tasks.yml b/.github/workflows/new_tasks.yml index a14e251c..edafb5b1 100644 --- a/.github/workflows/new_tasks.yml +++ b/.github/workflows/new_tasks.yml @@ -3,10 +3,10 @@ name: Tasks Modified on: push: branches: - - big-refactor + - 'big-refactor*' pull_request: branches: - - big-refactor + - 'big-refactor*' workflow_dispatch: # comment/edit out the above to stop/change the triggers jobs: @@ -18,7 +18,7 @@ jobs: - name: checkout uses: actions/checkout@v3 with: - fetch-depth: 0 # OR "2" -> To retrieve the preceding commit. + fetch-depth: 2 # OR "2" -> To retrieve the preceding commit. # Uses the tj-actions/changed-files@v37 action to check for changes. # Outputs provided here: https://github.com/tj-actions/changed-files#outputs @@ -63,10 +63,10 @@ jobs: - name: Test with pytest # if new tasks are added, run tests on them if: steps.changed-tasks.outputs.tasks_any_modified == 'true' - run: python -m pytest tests/extra/test_new_tasks.py -s -vv -n=auto + run: python -m pytest tests/test_tasks.py -s -vv -n=auto # if api is modified, run tests on it - name: Test more tasks with pytest env: API: true if: steps.changed-tasks.outputs.api_any_modified == 'true' - run: python -m pytest tests/extra/test_new_tasks.py -s -vv -n=auto + run: python -m pytest tests/test_tasks.py -s -vv -n=auto diff --git a/tests/extra/__init__.py b/tests/extra/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/extra/test_new_tasks.py b/tests/extra/test_new_tasks.py deleted file mode 100644 index 683584fd..00000000 --- a/tests/extra/test_new_tasks.py +++ /dev/null @@ -1,129 +0,0 @@ -import pytest -from itertools import islice -import lm_eval.tasks as tasks -from .utilities_testing import load_changed_files, parser -from typing import List -from lm_eval.api.task import ConfigurableTask -import os - - -# GitHub CI -def new_tasks() -> List[str]: - FILENAME = ".github/outputs/tasks_all_changed_and_modified_files.txt" - if os.path.exists(FILENAME): - # If tasks folder has changed then we get the list of files from FILENAME - # and parse the yaml files to get the task names. - return parser(load_changed_files(FILENAME)) - elif os.getenv("API") is not None: - # Or if API has changed then we set the ENV variable API to True - # and run given tasks. - return ["arc_easy", "hellaswag", "piqa", "wikitext"] - # if both not true just do arc_easy - else: - return ["arc_easy"] - - -def get_task_class() -> List[ConfigurableTask]: - task_name = new_tasks() - x = [cls for name, cls in tasks.TASK_REGISTRY.items() if name in task_name] - return x - - -@pytest.fixture() -def limit() -> int: - return 10 - - -# Tests -@pytest.mark.parametrize("task_class", get_task_class()) -class TestNewTasks: - def test_download(self, task_class: ConfigurableTask): - task_class().download() - assert task_class().dataset is not None - - def test_has_training_docs(self, task_class: ConfigurableTask): - assert task_class().has_training_docs() in [True, False] - - def test_check_training_docs(self, task_class: ConfigurableTask): - task = task_class() - if task.has_training_docs(): - assert task._config["training_split"] is not None - - def test_has_validation_docs(self, task_class): - assert task_class().has_validation_docs() in [True, False] - - def test_check_validation_docs(self, task_class): - task = task_class() - if task.has_validation_docs(): - assert task._config["validation_split"] is not None - - def test_has_test_docs(self, task_class): - assert task_class().has_test_docs() in [True, False] - - def test_check_test_docs(self, task_class): - task = task_class() - if task.has_test_docs(): - assert task._config["test_split"] is not None - - def test_should_decontaminate(self, task_class): - task = task_class() - assert task.should_decontaminate() in [True, False] - if task.should_decontaminate(): - assert task._config["doc_to_decontamination_query"] is not None - - def test_doc_to_text(self, task_class, limit): - task = task_class() - arr = ( - list(islice(task.test_docs(), limit)) - if task.has_test_docs() - else list(islice(task.validation_docs(), limit)) - ) - _array = [task.doc_to_text(doc) for doc in arr] - # space convention; allow txt to have length 0 for perplexity-like tasks since the model tacks an <|endoftext|> on - assert all( - isinstance(x, str) and (x[-1] != " " if len(x) != 0 else True) - for x in _array - ) - - def test_create_choices(self, task_class, limit): - task = task_class() - arr = ( - list(islice(task.test_docs(), limit)) - if task.has_test_docs() - else list(islice(task.validation_docs(), limit)) - ) - if "multiple_choice" in task._config.output_type: - _array = [task.doc_to_choice(doc) for doc in arr] - # assert all(len(x) == 4 for x in _array) - assert all(isinstance(x, list) for x in _array) - assert all(isinstance(x[0], str) for x in _array) - - def test_doc_to_target(self, task_class, limit): - task = task_class() - arr = ( - list(islice(task.test_docs(), limit)) - if task.has_test_docs() - else list(islice(task.validation_docs(), limit)) - ) - _array_target = [task.doc_to_target(doc) for doc in arr] - if task._config.output_type == "multiple_choice": - assert all(isinstance(label, int) for label in _array_target) - # _array_text = [task.doc_to_text(doc) for doc in arr] - # Not working - # assert all(tgt[0] == " " or txt[-1] == "\n" if len(txt) != 0 else True for txt, tgt in zip(_array_text, _array_target)) - - def test_build_all_requests(self, task_class, limit): - task_class().build_all_requests(rank=1, limit=limit, world_size=1) - assert task_class.instances is not None - - # ToDO: Add proper testing - def test_construct_requests(self, task_class, limit): - task = task_class() - arr = ( - list(islice(task.test_docs(), limit)) - if task.has_test_docs() - else list(islice(task.validation_docs(), limit)) - ) - requests = [task.construct_requests(doc, task.doc_to_text(doc)) for doc in arr] - # assert all(isinstance(doc, list) for doc in requests) - assert len(requests) == limit if limit else True diff --git a/tests/extra/test_utils.py b/tests/extra/test_utils.py deleted file mode 100644 index 7bbeb18d..00000000 --- a/tests/extra/test_utils.py +++ /dev/null @@ -1,23 +0,0 @@ -import json -from typing import List -from lm_eval.utils import load_yaml_config -from pathlib import Path - - -FILE_PATH = file_path = ".github/outputs/tasks_all_changed_and_modified_files.txt" - - -def load_changed_files(file_path: str = FILE_PATH) -> List[str]: - with open(file_path, "r") as f: - return [l for line in f.readlines() for l in line.strip().split(" ")] - - -def parser(full_path: List[str]) -> List[str]: - _output = set() - for x in full_path: - if x.endswith(".yaml"): - _output.add(load_yaml_config(x)["task"]) - elif x.endswith(".py"): - path = [str(x) for x in (list(Path(x).parent.glob("*.yaml")))] - _output |= {load_yaml_config(x)["task"] for x in path} - return list(_output) diff --git a/tests/test_tasks.py b/tests/test_tasks.py index e7244d5c..70875469 100644 --- a/tests/test_tasks.py +++ b/tests/test_tasks.py @@ -1,15 +1,23 @@ from itertools import islice import pytest -from typing import List +from .utils import new_tasks import lm_eval.tasks as tasks from lm_eval.api.task import ConfigurableTask -# Using fixtures to get the task class and limit -@pytest.fixture() -def task_class() -> ConfigurableTask: - task_name = ["arc_easy"] - x = [cls for name, cls in tasks.TASK_REGISTRY.items() if name in task_name] - return x[0] + +# Default Task +TASKS = ["arc_easy"] + + +def task_class(): + global TASKS + # CI: new_tasks checks if any modifications have been made + task_classes = new_tasks() + # Check if task_classes is empty + if task_classes: + return [tasks.TASK_REGISTRY.get(x)() for x in task_classes] + else: + return [tasks.TASK_REGISTRY.get(x)() for x in TASKS] @pytest.fixture() @@ -18,109 +26,96 @@ def limit() -> int: # Tests - - -def test_download(task_class: ConfigurableTask): - task_class().download() - assert task_class().dataset is not None - - -def test_has_training_docs(task_class: ConfigurableTask): - assert task_class().has_training_docs() in [True, False] - - -def test_check_training_docs(task_class: ConfigurableTask): - task = task_class() - if task.has_training_docs(): - assert task._config["training_split"] is not None - - -def test_has_validation_docs(task_class): - assert task_class().has_validation_docs() in [True, False] - - -def test_check_validation_docs(task_class): - task = task_class() - if task.has_validation_docs(): - assert task._config["validation_split"] is not None - - -def test_has_test_docs(task_class): - assert task_class().has_test_docs() in [True, False] - - -def test_check_test_docs(task_class): - task = task_class() - if task.has_test_docs(): - assert task._config["test_split"] is not None - - -def test_should_decontaminate(task_class): - task = task_class() - assert task.should_decontaminate() in [True, False] - if task.should_decontaminate(): - assert task._config["doc_to_decontamination_query"] is not None - - -def test_doc_to_text(task_class, limit): - task = task_class() - arr = ( - list(islice(task.test_docs(), limit)) - if task.has_test_docs() - else list(islice(task.validation_docs(), limit)) - ) - _array = [task.doc_to_text(doc) for doc in arr] - # space convention; allow txt to have length 0 for perplexity-like tasks since the model tacks an <|endoftext|> on - assert all( - isinstance(x, str) and (x[-1] != " " if len(x) != 0 else True) for x in _array - ) - - -def test_create_choices(task_class, limit): - task = task_class() - arr = ( - list(islice(task.test_docs(), limit)) - if task.has_test_docs() - else list(islice(task.validation_docs(), limit)) - ) - if "multiple_choice" in task._config.output_type: - _array = [task.doc_to_choice(doc) for doc in arr] - # assert all(len(x) == 4 for x in _array) - assert all(isinstance(x, list) for x in _array) - assert all(isinstance(x[0], str) for x in _array) - - -def test_doc_to_target(task_class, limit): - task = task_class() - arr = ( - list(islice(task.test_docs(), limit)) - if task.has_test_docs() - else list(islice(task.validation_docs(), limit)) - ) - _array_target = [task.doc_to_target(doc) for doc in arr] - if task._config.output_type == "multiple_choice": - assert all(isinstance(label, int) for label in _array_target) - # _array_text = [task.doc_to_text(doc) for doc in arr] - # Not working - # assert all(tgt[0] == " " or txt[-1] == "\n" if len(txt) != 0 else True for txt, tgt in zip(_array_text, _array_target)) - - -def test_build_all_requests(task_class, limit): - task_class().build_all_requests(rank=1, limit=limit, world_size=1) - assert task_class.instances is not None - - -# ToDO: Add proper testing -def test_construct_requests(task_class, limit): - task = task_class() - arr = ( - list(islice(task.test_docs(), limit)) - if task.has_test_docs() - else list(islice(task.validation_docs(), limit)) - ) - requests = [task.construct_requests(doc, task.doc_to_text(doc)) for doc in arr] - # assert all(isinstance(doc, list) for doc in requests) - assert len(requests) == limit if limit else True +@pytest.mark.parametrize("task_class", task_class()) +class TestNewTasks: + def test_download(self, task_class: ConfigurableTask): + task_class.download() + assert task_class.dataset is not None + + def test_has_training_docs(self, task_class: ConfigurableTask): + assert task_class.has_training_docs() in [True, False] + + def test_check_training_docs(self, task_class: ConfigurableTask): + if task_class.has_training_docs(): + assert task_class._config["training_split"] is not None + + def test_has_validation_docs(self, task_class): + assert task_class.has_validation_docs() in [True, False] + + def test_check_validation_docs(self, task_class): + if task_class.has_validation_docs(): + assert task_class._config["validation_split"] is not None + + def test_has_test_docs(self, task_class): + assert task_class.has_test_docs() in [True, False] + + def test_check_test_docs(self, task_class): + task = task_class + if task.has_test_docs(): + assert task._config["test_split"] is not None + + def test_should_decontaminate(self, task_class): + task = task_class + assert task.should_decontaminate() in [True, False] + if task.should_decontaminate(): + assert task._config["doc_to_decontamination_query"] is not None + + def test_doc_to_text(self, task_class, limit): + task = task_class + arr = ( + list(islice(task.test_docs(), limit)) + if task.has_test_docs() + else list(islice(task.validation_docs(), limit)) + ) + _array = [task.doc_to_text(doc) for doc in arr] + # space convention; allow txt to have length 0 for perplexity-like tasks since the model tacks an <|endoftext|> on + assert all( + isinstance(x, str) and (x[-1] != " " if len(x) != 0 else True) + for x in _array + ) + + def test_create_choices(self, task_class, limit): + task = task_class + arr = ( + list(islice(task.test_docs(), limit)) + if task.has_test_docs() + else list(islice(task.validation_docs(), limit)) + ) + if "multiple_choice" in task._config.output_type: + _array = [task.doc_to_choice(doc) for doc in arr] + # assert all(len(x) == 4 for x in _array) + assert all(isinstance(x, list) for x in _array) + assert all(isinstance(x[0], str) for x in _array) + + def test_doc_to_target(self, task_class, limit): + task = task_class + arr = ( + list(islice(task.test_docs(), limit)) + if task.has_test_docs() + else list(islice(task.validation_docs(), limit)) + ) + _array_target = [task.doc_to_target(doc) for doc in arr] + if task._config.output_type == "multiple_choice": + assert all(isinstance(label, int) for label in _array_target) + # _array_text = [task.doc_to_text(doc) for doc in arr] + # Not working + # assert all(tgt[0] == " " or txt[-1] == "\n" if len(txt) != 0 else True for txt, tgt in zip(_array_text, _array_target)) + + def test_build_all_requests(self, task_class, limit): + task_class.build_all_requests(rank=1, limit=limit, world_size=1) + assert task_class.instances is not None + + # ToDO: Add proper testing + def test_construct_requests(self, task_class, limit): + task = task_class + arr = ( + list(islice(task.test_docs(), limit)) + if task.has_test_docs() + else list(islice(task.validation_docs(), limit)) + ) + requests = [task.construct_requests(doc, task.doc_to_text(doc)) for doc in arr] + # assert all(isinstance(doc, list) for doc in requests) + assert len(requests) == limit if limit else True # def test_create_choices(task_class): diff --git a/tests/extra/utilities_testing.py b/tests/utils.py similarity index 63% rename from tests/extra/utilities_testing.py rename to tests/utils.py index 874a6334..089bc3ca 100644 --- a/tests/extra/utilities_testing.py +++ b/tests/utils.py @@ -1,20 +1,20 @@ -import json from typing import List from lm_eval.utils import load_yaml_config from pathlib import Path -import sys +from typing import Union +import os + +# {{{CI}}} # This is the path where the output for the changed files for the tasks folder is stored # FILE_PATH = file_path = ".github/outputs/tasks_all_changed_and_modified_files.txt" - # reads a text file and returns a list of words # used to read the output of the changed txt from tj-actions/changed-files def load_changed_files(file_path: str) -> List[str]: with open(file_path, "r") as f: content = f.read() words_list = [x for x in content.split()] - sys.stdout.write(f"list of files: {words_list}") return words_list @@ -30,3 +30,18 @@ def parser(full_path: List[str]) -> List[str]: path = [str(x) for x in (list(Path(x).parent.glob("*.yaml")))] _output |= {load_yaml_config(x)["task"] for x in path} return list(_output) + + +def new_tasks() -> Union[list[str], None]: + FILENAME = ".github/outputs/tasks_all_changed_and_modified_files.txt" + if os.path.exists(FILENAME): + # If tasks folder has changed then we get the list of files from FILENAME + # and parse the yaml files to get the task names. + return parser(load_changed_files(FILENAME)) + elif os.getenv("API") is not None: + # Or if API has changed then we set the ENV variable API to True + # and run given tasks. + return ["arc_easy", "hellaswag", "piqa", "wikitext"] + # if both not true just do arc_easy + else: + return -- GitLab From 7601d82827abfeb60288e1c561e8ac518a5b4d0d Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 5 Sep 2023 14:11:24 +0000 Subject: [PATCH 117/264] edit to fix cot filter --- .../tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml | 2 +- .../bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml index 34d7f066..680c2533 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml @@ -18,5 +18,5 @@ filter_list: - name: "get-answer" filter: - function: "regex" - regex_pattern: "(?<=The answer is )(.*)(?=.)" + regex_pattern: "(?<=the answer is )(.*)(?=.)" - function: "take_first" \ No newline at end of file diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml index bda6eb96..66ab12e3 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml @@ -18,5 +18,5 @@ filter_list: - name: "get-answer" filter: - function: "regex" - regex_pattern: "(?<=The answer is )(.*)(?=.)" + regex_pattern: "(?<=the answer is )(.*)(?=.)" - function: "take_first" \ No newline at end of file -- GitLab From a7286607ee29e2002382140a68dd3f4dba405b60 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 5 Sep 2023 15:21:08 +0000 Subject: [PATCH 118/264] change words/bytes calc for wikitext --- lm_eval/tasks/wikitext/preprocess_wikitext.py | 12 ++++++++++++ lm_eval/tasks/wikitext/wikitext.yaml | 1 + 2 files changed, 13 insertions(+) diff --git a/lm_eval/tasks/wikitext/preprocess_wikitext.py b/lm_eval/tasks/wikitext/preprocess_wikitext.py index e2d315d0..e5dff22b 100644 --- a/lm_eval/tasks/wikitext/preprocess_wikitext.py +++ b/lm_eval/tasks/wikitext/preprocess_wikitext.py @@ -34,3 +34,15 @@ def wikitext_detokenizer(doc): string = string.replace(" 's", "'s") return string + + +def process_results(doc, results): + (loglikelihood,) = results + # IMPORTANT: wikitext counts number of words in *original doc before detokenization* + _words = len(re.split(r"\s+", doc["page"])) + _bytes = len(doc["page"].encode("utf-8")) + return { + "word_perplexity": (loglikelihood, _words), + "byte_perplexity": (loglikelihood, _bytes), + "bits_per_byte": (loglikelihood, _bytes), + } diff --git a/lm_eval/tasks/wikitext/wikitext.yaml b/lm_eval/tasks/wikitext/wikitext.yaml index faa7c39e..e6ab913a 100644 --- a/lm_eval/tasks/wikitext/wikitext.yaml +++ b/lm_eval/tasks/wikitext/wikitext.yaml @@ -7,6 +7,7 @@ validation_split: validation test_split: test doc_to_text: "" doc_to_target: !function preprocess_wikitext.wikitext_detokenizer +process_results: !function preprocess_wikitext.process_results should_decontaminate: true doc_to_decontamination_query: "{{page}}" metric_list: -- GitLab From 9bca36a9b6334bb8891aa97b1709720199d52595 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 5 Sep 2023 15:43:22 +0000 Subject: [PATCH 119/264] pre-commit --- lm_eval/models/huggingface.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index 7dec4a89..7e3c73b3 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -296,14 +296,14 @@ class HFLM(LM): ) else: assert accelerator.distributed_type in [ - DistributedType.FSDP, - DistributedType.MULTI_GPU + DistributedType.FSDP, + DistributedType.MULTI_GPU, ], "Unsupported distributed type provided. Only DDP and FSDP are supported." if accelerator.distributed_type == DistributedType.FSDP: self._model = accelerator.prepare(self.model) else: self._model = accelerator.prepare_model( - self.model, evaluation_mode=True + self.model, evaluation_mode=True ) self._device = torch.device(f"cuda:{accelerator.local_process_index}") self.accelerator = accelerator -- GitLab From 01ad787d3b9f26cc09e4abd01542fb2d8eb7c3c1 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 5 Sep 2023 16:05:39 +0000 Subject: [PATCH 120/264] format for pre-commit --- lm_eval/api/task.py | 1 - lm_eval/models/huggingface.py | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index ef4d91f7..c0a0d36f 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -88,7 +88,6 @@ class TaskConfig(dict): metadata: str = None # by default, not used in the code. allows for users to pass arbitrary info to tasks - def __post_init__(self) -> None: if "." in self.dataset_path: import inspect diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index bb51c7d5..e960654d 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -432,9 +432,9 @@ class HFLM(LM): return encoding def tok_batch_encode( - self, - strings: List[str], - padding_side: str = "left", + self, + strings: List[str], + padding_side: str = "left", left_truncate_len: int = None, truncation: bool = False, ): -- GitLab From f1e62d3650542c13b3e1fd9fbee4a4c0757c4f4d Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Tue, 5 Sep 2023 01:56:30 +0100 Subject: [PATCH 121/264] added torch deterministic --- tests/models/test_huggingface.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index f5bf217c..b50ddd53 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -4,10 +4,11 @@ import numpy as np from lm_eval.models.huggingface import HFLM from lm_eval.api.instance import Instance import lm_eval.tasks as tasks +import torch class Test_HFLM: - + torch.use_deterministic_algorithms(True) multiple_choice_task = tasks.TASK_REGISTRY.get("arc_easy")() # type: ignore multiple_choice_task.build_all_requests(limit=10, rank=0, world_size=1) MULTIPLE_CH: list[Instance] = multiple_choice_task.instances -- GitLab From 96c60cf6ec2ff91310a9000002afd6574201dc43 Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Wed, 6 Sep 2023 01:59:00 +0100 Subject: [PATCH 122/264] artifact loglikelihood results --- .github/workflows/unit_tests.yml | 6 ++++++ tests/models/test_huggingface.py | 10 +++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index d5da1a55..be9f3fa9 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -70,3 +70,9 @@ jobs: # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - name: Test with pytest run: python -m pytest --showlocals -s -vv -n=auto --ignore=tests/tests_master --ignore=tests/extra + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: output_results + path: | + test_logs/* diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index b50ddd53..f5befffa 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -1,14 +1,17 @@ from __future__ import annotations import pytest +from pathlib import Path import numpy as np from lm_eval.models.huggingface import HFLM from lm_eval.api.instance import Instance import lm_eval.tasks as tasks +import sys import torch class Test_HFLM: torch.use_deterministic_algorithms(True) + version_minor = sys.version_info.minor multiple_choice_task = tasks.TASK_REGISTRY.get("arc_easy")() # type: ignore multiple_choice_task.build_all_requests(limit=10, rank=0, world_size=1) MULTIPLE_CH: list[Instance] = multiple_choice_task.instances @@ -91,7 +94,12 @@ class Test_HFLM: def test_logliklihood(self) -> None: res = self.LM.loglikelihood(self.MULTIPLE_CH) _RES, _res = self.MULTIPLE_CH_RES, [r[0] for r in res] - # change atol in case of consistent failure + # log samples to CI + file_path = Path(f"test_logs/outputs_log_{self.version_minor}.txt") + file_path.mkdir(parents=True, exist_ok=True) + file_path = file_path.resolve() + with open(file_path, "w") as f: + f.write("\n".join(str(x) for x in _res)) assert np.allclose(_res, _RES, atol=1e-4) # check indices for Multiple Choice argmax_RES, argmax_res = np.argmax( -- GitLab From dfc47bd515d6875b756b92931c02661c698e1f45 Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Wed, 6 Sep 2023 02:03:16 +0100 Subject: [PATCH 123/264] changed atol --- tests/models/test_huggingface.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index f5befffa..0ecd02aa 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -95,12 +95,14 @@ class Test_HFLM: res = self.LM.loglikelihood(self.MULTIPLE_CH) _RES, _res = self.MULTIPLE_CH_RES, [r[0] for r in res] # log samples to CI - file_path = Path(f"test_logs/outputs_log_{self.version_minor}.txt") - file_path.mkdir(parents=True, exist_ok=True) + dir_path = Path("test_logs") + dir_path.mkdir(parents=True, exist_ok=True) + + file_path = dir_path / f"outputs_log_{self.version_minor}.txt" file_path = file_path.resolve() with open(file_path, "w") as f: f.write("\n".join(str(x) for x in _res)) - assert np.allclose(_res, _RES, atol=1e-4) + assert np.allclose(_res, _RES, atol=1e-2) # check indices for Multiple Choice argmax_RES, argmax_res = np.argmax( np.array(_RES).reshape(-1, 4), axis=1 -- GitLab From 00c4ffff9ada116ed58b46c137a204fbcad62e36 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Wed, 6 Sep 2023 21:39:08 +0700 Subject: [PATCH 124/264] Update README.md Crossed WMT off. --- lm_eval/tasks/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/README.md b/lm_eval/tasks/README.md index 02fad1b7..586aa028 100644 --- a/lm_eval/tasks/README.md +++ b/lm_eval/tasks/README.md @@ -42,7 +42,7 @@ Boxes should be checked iff tasks are implemented in the refactor and tested for - [ ] GSM8k - [x] Arithmetic - [ ] MMMLU (Hailey) -- [ ] Translation (WMT) suite (Hailey) +- [x] Translation (WMT) suite - [x] Unscramble - [x] ~~Pile (perplexity)~~ - [x] BLiMP -- GitLab From fd2790894642292f7b480e10a66e16a33e22335c Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 6 Sep 2023 15:40:26 +0000 Subject: [PATCH 125/264] tidy up --- lm_eval/tasks/mgsm/README.md | 32 +++++++++++++++---- lm_eval/tasks/mgsm/{ => direct}/direct_yaml | 2 +- .../mgsm_direct_bn.yaml} | 0 .../mgsm_direct_de.yaml} | 2 +- .../mgsm_direct_en.yaml} | 2 +- .../mgsm_direct_es.yaml} | 2 +- .../mgsm_direct_fr.yaml} | 2 +- .../mgsm_direct_ja.yaml} | 2 +- .../mgsm_direct_ru.yaml} | 2 +- .../mgsm_direct_sw.yaml} | 2 +- .../mgsm_direct_te.yaml} | 2 +- .../mgsm_direct_th.yaml} | 2 +- .../mgsm_direct_zh.yaml} | 2 +- lm_eval/tasks/mgsm/{ => native_cot}/cot_yaml | 2 +- .../mgsm_cot_native_bn.yaml} | 0 .../mgsm_cot_native_de.yaml} | 0 .../mgsm_cot_native_en.yaml} | 0 .../mgsm_cot_native_es.yaml} | 0 .../mgsm_cot_native_fr.yaml} | 0 .../mgsm_cot_native_ja.yaml} | 0 .../mgsm_cot_native_ru.yaml} | 0 .../mgsm_cot_native_sw.yaml} | 0 .../mgsm_cot_native_te.yaml} | 0 .../mgsm_cot_native_th.yaml} | 0 .../mgsm_cot_native_zh.yaml} | 0 25 files changed, 38 insertions(+), 18 deletions(-) rename lm_eval/tasks/mgsm/{ => direct}/direct_yaml (97%) rename lm_eval/tasks/mgsm/{mgsm_bn_direct.yaml => direct/mgsm_direct_bn.yaml} (100%) rename lm_eval/tasks/mgsm/{mgsm_de_direct.yaml => direct/mgsm_direct_de.yaml} (93%) rename lm_eval/tasks/mgsm/{mgsm_en_direct.yaml => direct/mgsm_direct_en.yaml} (93%) rename lm_eval/tasks/mgsm/{mgsm_es_direct.yaml => direct/mgsm_direct_es.yaml} (93%) rename lm_eval/tasks/mgsm/{mgsm_fr_direct.yaml => direct/mgsm_direct_fr.yaml} (93%) rename lm_eval/tasks/mgsm/{mgsm_ja_direct.yaml => direct/mgsm_direct_ja.yaml} (93%) rename lm_eval/tasks/mgsm/{mgsm_ru_direct.yaml => direct/mgsm_direct_ru.yaml} (93%) rename lm_eval/tasks/mgsm/{mgsm_sw_direct.yaml => direct/mgsm_direct_sw.yaml} (93%) rename lm_eval/tasks/mgsm/{mgsm_te_direct.yaml => direct/mgsm_direct_te.yaml} (93%) rename lm_eval/tasks/mgsm/{mgsm_th_direct.yaml => direct/mgsm_direct_th.yaml} (93%) rename lm_eval/tasks/mgsm/{mgsm_zh_direct.yaml => direct/mgsm_direct_zh.yaml} (93%) rename lm_eval/tasks/mgsm/{ => native_cot}/cot_yaml (96%) rename lm_eval/tasks/mgsm/{mgsm_bn_native-cot.yaml => native_cot/mgsm_cot_native_bn.yaml} (100%) rename lm_eval/tasks/mgsm/{mgsm_de_native-cot.yaml => native_cot/mgsm_cot_native_de.yaml} (100%) rename lm_eval/tasks/mgsm/{mgsm_en_native-cot.yaml => native_cot/mgsm_cot_native_en.yaml} (100%) rename lm_eval/tasks/mgsm/{mgsm_es_native-cot.yaml => native_cot/mgsm_cot_native_es.yaml} (100%) rename lm_eval/tasks/mgsm/{mgsm_fr_native-cot.yaml => native_cot/mgsm_cot_native_fr.yaml} (100%) rename lm_eval/tasks/mgsm/{mgsm_ja_native-cot.yaml => native_cot/mgsm_cot_native_ja.yaml} (100%) rename lm_eval/tasks/mgsm/{mgsm_ru_native-cot.yaml => native_cot/mgsm_cot_native_ru.yaml} (100%) rename lm_eval/tasks/mgsm/{mgsm_sw_native-cot.yaml => native_cot/mgsm_cot_native_sw.yaml} (100%) rename lm_eval/tasks/mgsm/{mgsm_te_native-cot.yaml => native_cot/mgsm_cot_native_te.yaml} (100%) rename lm_eval/tasks/mgsm/{mgsm_th_native-cot.yaml => native_cot/mgsm_cot_native_th.yaml} (100%) rename lm_eval/tasks/mgsm/{mgsm_zh_native-cot.yaml => native_cot/mgsm_cot_native_zh.yaml} (100%) diff --git a/lm_eval/tasks/mgsm/README.md b/lm_eval/tasks/mgsm/README.md index 475b55c3..90f8e44b 100644 --- a/lm_eval/tasks/mgsm/README.md +++ b/lm_eval/tasks/mgsm/README.md @@ -53,12 +53,32 @@ Homepage: https://github.com/google-research/url-nlp/tree/main/mgsm #### Groups -* `mgsm` - -#### Tasks - -* `task_name`: `1-sentence description of what this particular task does` -* `task_name2`: ... +* `mgsm_direct`: Direct question + * `mgsm_direct_bn`: Bengali + * `mgsm_direct_de`: German + * `mgsm_direct_en`: English + * `mgsm_direct_es`: Spanish + * `mgsm_direct_fr`: French + * `mgsm_direct_ja`: Japanese + * `mgsm_direct_ru`: Russian + * `mgsm_direct_sw`: Swahili + * `mgsm_direct_te`: Telugu + * `mgsm_direct_th`: Thai + * `mgsm_direct_zh`: Chinese +* `mgsm_cot_native`: Question with Answer followed by CoT prompt in the same language as the dataset. + * `mgsm_cot_native_bn`: Bengali + * `mgsm_cot_native_de`: German + * `mgsm_cot_native_en`: English + * `mgsm_cot_native_es`: Spanish + * `mgsm_cot_native_fr`: French + * `mgsm_cot_native_ja`: Japanese + * `mgsm_cot_native_ru`: Russian + * `mgsm_cot_native_sw`: Swahili + * `mgsm_cot_native_te`: Telugu + * `mgsm_cot_native_th`: Thai + * `mgsm_cot_native_zh`: Chinese + +Examplar Samples: https://github.com/google-research/url-nlp/blob/main/mgsm/exemplars.py ### Checklist diff --git a/lm_eval/tasks/mgsm/direct_yaml b/lm_eval/tasks/mgsm/direct/direct_yaml similarity index 97% rename from lm_eval/tasks/mgsm/direct_yaml rename to lm_eval/tasks/mgsm/direct/direct_yaml index 17649d2d..6eae3257 100644 --- a/lm_eval/tasks/mgsm/direct_yaml +++ b/lm_eval/tasks/mgsm/direct/direct_yaml @@ -1,7 +1,7 @@ # This file will be included in the generated language-specific task configs. # It doesn't have a yaml file extension as it is not meant to be imported directly # by the harness. -group: mgsm +group: mgsm_direct dataset_path: juletxara/mgsm dataset_name: null # Overridden by language-specific config. output_type: greedy_until diff --git a/lm_eval/tasks/mgsm/mgsm_bn_direct.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_bn_direct.yaml rename to lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml diff --git a/lm_eval/tasks/mgsm/mgsm_de_direct.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_de.yaml similarity index 93% rename from lm_eval/tasks/mgsm/mgsm_de_direct.yaml rename to lm_eval/tasks/mgsm/direct/mgsm_direct_de.yaml index 97b6a92f..a523c0ef 100644 --- a/lm_eval/tasks/mgsm/mgsm_de_direct.yaml +++ b/lm_eval/tasks/mgsm/direct/mgsm_direct_de.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[7+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\nAntwort"}}{% else %}{{"Frage: "+question+"\nAntwort"}}{% endif %}' include: direct_yaml -task: mgsm_de_direct +task: mgsm_direct_de diff --git a/lm_eval/tasks/mgsm/mgsm_en_direct.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_en.yaml similarity index 93% rename from lm_eval/tasks/mgsm/mgsm_en_direct.yaml rename to lm_eval/tasks/mgsm/direct/mgsm_direct_en.yaml index f0ff6c9d..0570267f 100644 --- a/lm_eval/tasks/mgsm/mgsm_en_direct.yaml +++ b/lm_eval/tasks/mgsm/direct/mgsm_direct_en.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"Question: "+question+"\nAnswer"}}{% endif %}' include: direct_yaml -task: mgsm_en_direct +task: mgsm_direct_en diff --git a/lm_eval/tasks/mgsm/mgsm_es_direct.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_es.yaml similarity index 93% rename from lm_eval/tasks/mgsm/mgsm_es_direct.yaml rename to lm_eval/tasks/mgsm/direct/mgsm_direct_es.yaml index 5430016e..9d2f1fa3 100644 --- a/lm_eval/tasks/mgsm/mgsm_es_direct.yaml +++ b/lm_eval/tasks/mgsm/direct/mgsm_direct_es.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"Pregunta: "+question+"\nAnswer"}}{% endif %}' include: direct_yaml -task: mgsm_es_direct +task: mgsm_direct_es diff --git a/lm_eval/tasks/mgsm/mgsm_fr_direct.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_fr.yaml similarity index 93% rename from lm_eval/tasks/mgsm/mgsm_fr_direct.yaml rename to lm_eval/tasks/mgsm/direct/mgsm_direct_fr.yaml index b5e0f924..b39d97ce 100644 --- a/lm_eval/tasks/mgsm/mgsm_fr_direct.yaml +++ b/lm_eval/tasks/mgsm/direct/mgsm_direct_fr.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"Question : "+question+"\nAnswer"}}{% endif %}' include: direct_yaml -task: mgsm_fr_direct +task: mgsm_direct_fr diff --git a/lm_eval/tasks/mgsm/mgsm_ja_direct.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_ja.yaml similarity index 93% rename from lm_eval/tasks/mgsm/mgsm_ja_direct.yaml rename to lm_eval/tasks/mgsm/direct/mgsm_direct_ja.yaml index ef9d862c..8f81ff43 100644 --- a/lm_eval/tasks/mgsm/mgsm_ja_direct.yaml +++ b/lm_eval/tasks/mgsm/direct/mgsm_direct_ja.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"問題: "+question+"\nAnswer"}}{% endif %}' include: direct_yaml -task: mgsm_ja_direct +task: mgsm_direct_ja diff --git a/lm_eval/tasks/mgsm/mgsm_ru_direct.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_ru.yaml similarity index 93% rename from lm_eval/tasks/mgsm/mgsm_ru_direct.yaml rename to lm_eval/tasks/mgsm/direct/mgsm_direct_ru.yaml index fe581069..01e7987f 100644 --- a/lm_eval/tasks/mgsm/mgsm_ru_direct.yaml +++ b/lm_eval/tasks/mgsm/direct/mgsm_direct_ru.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"Задача: "+question+"\nAnswer"}}{% endif %}' include: direct_yaml -task: mgsm_ru_direct +task: mgsm_direct_ru diff --git a/lm_eval/tasks/mgsm/mgsm_sw_direct.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_sw.yaml similarity index 93% rename from lm_eval/tasks/mgsm/mgsm_sw_direct.yaml rename to lm_eval/tasks/mgsm/direct/mgsm_direct_sw.yaml index d19052e3..7f04a7c4 100644 --- a/lm_eval/tasks/mgsm/mgsm_sw_direct.yaml +++ b/lm_eval/tasks/mgsm/direct/mgsm_direct_sw.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"Swali: "+question+"\nAnswer"}}{% endif %}' include: direct_yaml -task: mgsm_sw_direct +task: mgsm_direct_sw diff --git a/lm_eval/tasks/mgsm/mgsm_te_direct.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_te.yaml similarity index 93% rename from lm_eval/tasks/mgsm/mgsm_te_direct.yaml rename to lm_eval/tasks/mgsm/direct/mgsm_direct_te.yaml index 0b8250ca..ce0a1461 100644 --- a/lm_eval/tasks/mgsm/mgsm_te_direct.yaml +++ b/lm_eval/tasks/mgsm/direct/mgsm_direct_te.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"ప్రశ్న: "+question+"\nAnswer"}}{% endif %}' include: direct_yaml -task: mgsm_te_direct +task: mgsm_direct_te diff --git a/lm_eval/tasks/mgsm/mgsm_th_direct.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_th.yaml similarity index 93% rename from lm_eval/tasks/mgsm/mgsm_th_direct.yaml rename to lm_eval/tasks/mgsm/direct/mgsm_direct_th.yaml index 275d1c4d..1db7cd33 100644 --- a/lm_eval/tasks/mgsm/mgsm_th_direct.yaml +++ b/lm_eval/tasks/mgsm/direct/mgsm_direct_th.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"โจทย์: "+question+"\nAnswer"}}{% endif %}' include: direct_yaml -task: mgsm_th_direct +task: mgsm_direct_th diff --git a/lm_eval/tasks/mgsm/mgsm_zh_direct.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_zh.yaml similarity index 93% rename from lm_eval/tasks/mgsm/mgsm_zh_direct.yaml rename to lm_eval/tasks/mgsm/direct/mgsm_direct_zh.yaml index 0b1ab79f..1b873be4 100644 --- a/lm_eval/tasks/mgsm/mgsm_zh_direct.yaml +++ b/lm_eval/tasks/mgsm/direct/mgsm_direct_zh.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"问题: "+question+"\nAnswer"}}{% endif %}' include: direct_yaml -task: mgsm_zh_direct +task: mgsm_direct_zh diff --git a/lm_eval/tasks/mgsm/cot_yaml b/lm_eval/tasks/mgsm/native_cot/cot_yaml similarity index 96% rename from lm_eval/tasks/mgsm/cot_yaml rename to lm_eval/tasks/mgsm/native_cot/cot_yaml index a1def277..08182e0d 100644 --- a/lm_eval/tasks/mgsm/cot_yaml +++ b/lm_eval/tasks/mgsm/native_cot/cot_yaml @@ -1,7 +1,7 @@ # This file will be included in the generated language-specific task configs. # It doesn't have a yaml file extension as it is not meant to be imported directly # by the harness. -group: mgsm +group: mgsm_cot_native dataset_path: juletxara/mgsm dataset_name: null # Overridden by language-specific config. output_type: greedy_until diff --git a/lm_eval/tasks/mgsm/mgsm_bn_native-cot.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_bn_native-cot.yaml rename to lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml diff --git a/lm_eval/tasks/mgsm/mgsm_de_native-cot.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_de_native-cot.yaml rename to lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml diff --git a/lm_eval/tasks/mgsm/mgsm_en_native-cot.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_en_native-cot.yaml rename to lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml diff --git a/lm_eval/tasks/mgsm/mgsm_es_native-cot.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_es_native-cot.yaml rename to lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml diff --git a/lm_eval/tasks/mgsm/mgsm_fr_native-cot.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_fr_native-cot.yaml rename to lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml diff --git a/lm_eval/tasks/mgsm/mgsm_ja_native-cot.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_ja_native-cot.yaml rename to lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml diff --git a/lm_eval/tasks/mgsm/mgsm_ru_native-cot.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_ru_native-cot.yaml rename to lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml diff --git a/lm_eval/tasks/mgsm/mgsm_sw_native-cot.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_sw_native-cot.yaml rename to lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml diff --git a/lm_eval/tasks/mgsm/mgsm_te_native-cot.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_te_native-cot.yaml rename to lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml diff --git a/lm_eval/tasks/mgsm/mgsm_th_native-cot.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_th_native-cot.yaml rename to lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml diff --git a/lm_eval/tasks/mgsm/mgsm_zh_native-cot.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml similarity index 100% rename from lm_eval/tasks/mgsm/mgsm_zh_native-cot.yaml rename to lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml -- GitLab From 681b2e2c76b57f546c50b522f5613f8dec1755be Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 6 Sep 2023 15:42:02 +0000 Subject: [PATCH 126/264] change name --- lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml b/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml index 87622a3a..7c43724e 100644 --- a/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml +++ b/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[6+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\nAnswer"}}{% else %}{{"প্রশ্ন: "+question+"\nAnswer"}}{% endif %}' include: direct_yaml -task: mgsm_bn_direct +task: mgsm_direct_bn -- GitLab From 78522c9448fd39ae42d10d79fcf7ca89e266633b Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 6 Sep 2023 15:42:35 +0000 Subject: [PATCH 127/264] edit readme --- lm_eval/tasks/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/README.md b/lm_eval/tasks/README.md index eec57638..8e5393b9 100644 --- a/lm_eval/tasks/README.md +++ b/lm_eval/tasks/README.md @@ -56,7 +56,7 @@ Boxes should be checked iff tasks are implemented in the refactor and tested for - [x] XWinograd - [x] PAWS-X - [x] XNLI -- [ ] MGSM (Lintang) +- [x] MGSM - [ ] SCROLLS - [x] Babi -- GitLab From 57c29709def352afa0ecc599dfd8756010c19dbc Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 7 Sep 2023 09:44:12 +0000 Subject: [PATCH 128/264] adjust --- lm_eval/tasks/mgsm/native_cot/cot_yaml | 6 ++++++ lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml | 6 ------ lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml | 6 ------ lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml | 6 ------ lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml | 6 ------ lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml | 6 ------ lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml | 6 ------ lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml | 6 ------ lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml | 6 ------ lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml | 6 ------ lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml | 6 ------ lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml | 6 ------ 12 files changed, 6 insertions(+), 66 deletions(-) diff --git a/lm_eval/tasks/mgsm/native_cot/cot_yaml b/lm_eval/tasks/mgsm/native_cot/cot_yaml index 08182e0d..f5cf60d9 100644 --- a/lm_eval/tasks/mgsm/native_cot/cot_yaml +++ b/lm_eval/tasks/mgsm/native_cot/cot_yaml @@ -21,3 +21,9 @@ metric_list: higher_is_better: true ignore_case: true ignore_punctuation: true +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)" + - function: "take_first" diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml index ce050085..c4918f5c 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml @@ -4,11 +4,5 @@ doc_to_target: '{% if answer is not none %}{{answer[16+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nধাপে ধাপে উত্তর:"}}{% else %}{{"প্রশ্ন: "+question+"\nধাপে ধাপে উত্তর:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer include: cot_yaml task: mgsm_bn_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml index ff8d789b..e84bcb45 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml @@ -4,11 +4,5 @@ doc_to_target: '{% if answer is not none %}{{answer[28+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nSchritt-für-Schritt-Antwort:"}}{% else %}{{"Frage: "+question+"\nSchritt-für-Schritt-Antwort:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer include: cot_yaml task: mgsm_de_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml index 9174cc90..7d39dcc0 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml @@ -4,11 +4,5 @@ doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer include: cot_yaml task: mgsm_en_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml index ed223378..0bd3b688 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml @@ -4,11 +4,5 @@ doc_to_target: '{% if answer is not none %}{{answer[22+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nRespuesta paso a paso:"}}{% else %}{{"Pregunta: "+question+"\nRespuesta paso a paso:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer include: cot_yaml task: mgsm_es_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml index 4aa4a252..500cb05a 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml @@ -4,11 +4,5 @@ doc_to_target: '{% if answer is not none %}{{answer[25+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nRéponse étape par étape :"}}{% else %}{{"Question : "+question+"\nRéponse étape par étape :"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer include: cot_yaml task: mgsm_fr_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml index 282c1f4c..05830b22 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml @@ -4,11 +4,5 @@ doc_to_target: '{% if answer is not none %}{{answer[10+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nステップごとの答え:"}}{% else %}{{"問題: "+question+"\nステップごとの答え:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer include: cot_yaml task: mgsm_ja_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml index b5fd4128..0cbd994c 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml @@ -4,11 +4,5 @@ doc_to_target: '{% if answer is not none %}{{answer[17+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nПошаговоерешение:"}}{% else %}{{"Задача: "+question+"\nПошаговоерешение:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer include: cot_yaml task: mgsm_ru_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml index 63655f65..4b47b2fe 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml @@ -4,11 +4,5 @@ doc_to_target: '{% if answer is not none %}{{answer[24+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nJibu la Hatua kwa Hatua:"}}{% else %}{{"Swali: "+question+"\nJibu la Hatua kwa Hatua:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer include: cot_yaml task: mgsm_sw_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml index 08bb1dd3..4c3b76a0 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml @@ -4,11 +4,5 @@ doc_to_target: '{% if answer is not none %}{{answer[18+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nదశలవారీగా సమాధానం:"}}{% else %}{{"ప్రశ్న: "+question+"\nదశలవారీగా సమాధానం:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer include: cot_yaml task: mgsm_te_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml index d9b8c9da..5311bcf8 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml @@ -4,11 +4,5 @@ doc_to_target: '{% if answer is not none %}{{answer[17+1]}}{% else %}{{answer_nu endif %}' doc_to_text: '{% if answer is not none %}{{question+"\nคำตอบทีละขั้นตอน:"}}{% else %}{{"โจทย์: "+question+"\nคำตอบทีละขั้นตอน:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer include: cot_yaml task: mgsm_th_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml index 65678633..db0d83b7 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml @@ -4,11 +4,5 @@ doc_to_target: '{% if answer is not none %}{{answer[5+1]}}{% else %}{{answer_num endif %}' doc_to_text: '{% if answer is not none %}{{question+"\n逐步解答:"}}{% else %}{{"问题: "+question+"\n逐步解答:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer include: cot_yaml task: mgsm_zh_direct -- GitLab From af4b012e82414a65885ff22e17219489d86cf397 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 7 Sep 2023 09:44:36 +0000 Subject: [PATCH 129/264] add mutual --- lm_eval/tasks/mutual/README.md | 48 ++++++++++++++++++++++++++ lm_eval/tasks/mutual/multual_plus.yaml | 3 ++ lm_eval/tasks/mutual/mutual.yaml | 23 ++++++++++++ lm_eval/tasks/mutual/utils.py | 36 +++++++++++++++++++ 4 files changed, 110 insertions(+) create mode 100644 lm_eval/tasks/mutual/README.md create mode 100644 lm_eval/tasks/mutual/multual_plus.yaml create mode 100644 lm_eval/tasks/mutual/mutual.yaml create mode 100644 lm_eval/tasks/mutual/utils.py diff --git a/lm_eval/tasks/mutual/README.md b/lm_eval/tasks/mutual/README.md new file mode 100644 index 00000000..966fb840 --- /dev/null +++ b/lm_eval/tasks/mutual/README.md @@ -0,0 +1,48 @@ +# MuTual + +### Paper + +Title: `MuTual: A Dataset for Multi-Turn Dialogue Reasoning` + +Abstract: https://www.aclweb.org/anthology/2020.acl-main.130/ + +MuTual is a retrieval-based dataset for multi-turn dialogue reasoning, which is +modified from Chinese high school English listening comprehension test data. + +Homepage: https://github.com/Nealcly/MuTual + +### Citation + +``` +@inproceedings{mutual, + title = "MuTual: A Dataset for Multi-Turn Dialogue Reasoning", + author = "Cui, Leyang and Wu, Yu and Liu, Shujie and Zhang, Yue and Zhou, Ming" , + booktitle = "Proceedings of the 58th Conference of the Association for Computational Linguistics", + year = "2020", + publisher = "Association for Computational Linguistics", +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `mutual` +* `mutual_plus` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/mutual/multual_plus.yaml b/lm_eval/tasks/mutual/multual_plus.yaml new file mode 100644 index 00000000..5c53ef6b --- /dev/null +++ b/lm_eval/tasks/mutual/multual_plus.yaml @@ -0,0 +1,3 @@ +include: mutual.yaml +task: mutual_plus +dataset_name: mutual_plus diff --git a/lm_eval/tasks/mutual/mutual.yaml b/lm_eval/tasks/mutual/mutual.yaml new file mode 100644 index 00000000..83743813 --- /dev/null +++ b/lm_eval/tasks/mutual/mutual.yaml @@ -0,0 +1,23 @@ +task: mutual +dataset_path: "EleutherAI/mutual" +dataset_name: mutual +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{article}}" +doc_to_target: "{{['A', 'B', 'C', 'D'].index(answers)}}" +doc_to_choice: "{{options}}" +process_docs: !function utils.process_docs +process_results: !function utils.process_results +should_decontaminate: true +doc_to_decontamination_query: "{{article}}" +metric_list: + - metric: r@1 + aggregation: mean + higher_is_better: true + - metric: r@2 + aggregation: mean + higher_is_better: true + - metric: mrr + aggregation: mean + higher_is_better: true diff --git a/lm_eval/tasks/mutual/utils.py b/lm_eval/tasks/mutual/utils.py new file mode 100644 index 00000000..46d3ce4b --- /dev/null +++ b/lm_eval/tasks/mutual/utils.py @@ -0,0 +1,36 @@ +import numpy as np + +def process_docs(dataset): + + def _detokenize(text): + text = text.replace(" '", "'") + text = text.replace(" \n", "\n") + text = text.replace("\n ", "\n") + text = text.replace(" n't", "n't") + text = text.replace("`` ", '"') + text = text.replace("''", '"') + # punctuation + text = text.replace(" :", ":") + text = text.replace(" ;", ";") + text = text.replace(" !", "!") + text = text.replace(" ?", "?") + text = text.replace(" ,", ",") + text = text.replace(" .", ".") + return text + + def _process(doc): + return { + "article": _detokenize(doc["article"]), + "options": [_detokenize(option) for option in doc["options"]], + } + + return dataset.map(_process) + + +def process_results(doc, results): + gold = ["A", "B", "C", "D"].index(doc["answers"]) + r4_1 = np.argmax(results) == gold # r4_1 = accuracy + ranks = sorted(results, reverse=True) + r4_2 = (ranks.index(results[gold]) == 1) + r4_1 + mrr = 1.0 / (ranks.index(results[gold]) + 1) # `+ 1` for index offset + return {"r@1": r4_1, "r@2": r4_2, "mrr": mrr} -- GitLab From 2d5306648370be0c96aa41ada03e486dcd6fa3ac Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 7 Sep 2023 09:44:53 +0000 Subject: [PATCH 130/264] add qasper --- lm_eval/tasks/qasper/README.md | 63 ++++++++++++++++++++++++++ lm_eval/tasks/qasper/bool.yaml | 12 +++++ lm_eval/tasks/qasper/freeform.yaml | 16 +++++++ lm_eval/tasks/qasper/metrics.py | 35 ++++++++++++++ lm_eval/tasks/qasper/utils.py | 73 ++++++++++++++++++++++++++++++ 5 files changed, 199 insertions(+) create mode 100644 lm_eval/tasks/qasper/README.md create mode 100644 lm_eval/tasks/qasper/bool.yaml create mode 100644 lm_eval/tasks/qasper/freeform.yaml create mode 100644 lm_eval/tasks/qasper/metrics.py create mode 100644 lm_eval/tasks/qasper/utils.py diff --git a/lm_eval/tasks/qasper/README.md b/lm_eval/tasks/qasper/README.md new file mode 100644 index 00000000..0964f87f --- /dev/null +++ b/lm_eval/tasks/qasper/README.md @@ -0,0 +1,63 @@ +# QASPER + +### Paper + +Title: `A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers` + +Abstract: https://arxiv.org/abs/2105.03011 + +QASPER is a dataset of 5,049 questions over 1,585 Natural Language Processing papers. +Each question is written by an NLP practitioner who read only the title and abstract +of the corresponding paper, and the question seeks information present in the full +text. The questions are then answered by a separate set of NLP practitioners who also +provide supporting evidence to answers. + +Homepage: https://allenai.org/data/qasper + +### Citation + +``` +@article{DBLP:journals/corr/abs-2105-03011, + author = {Pradeep Dasigi and + Kyle Lo and + Iz Beltagy and + Arman Cohan and + Noah A. Smith and + Matt Gardner}, + title = {A Dataset of Information-Seeking Questions and Answers Anchored in + Research Papers}, + journal = {CoRR}, + volume = {abs/2105.03011}, + year = {2021}, + url = {https://arxiv.org/abs/2105.03011}, + eprinttype = {arXiv}, + eprint = {2105.03011}, + timestamp = {Fri, 14 May 2021 12:13:30 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-2105-03011.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + +### Groups and Tasks + +#### Groups + +* `qasper`: `Short description` + +#### Tasks + +* `qasper_bool`: `1-sentence description of what this particular task does` +* `qasper_freeform`: ... + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/qasper/bool.yaml b/lm_eval/tasks/qasper/bool.yaml new file mode 100644 index 00000000..bcb4e432 --- /dev/null +++ b/lm_eval/tasks/qasper/bool.yaml @@ -0,0 +1,12 @@ +group: qasper +task: qasper_bool +dataset_path: qasper +output_type: multiple_choice +training_split: train +validation_split: validation +process_docs: !function utils.process_docs_bool +doc_to_text: "TITLE: {{title}}\nABSTRACT: {{abstract}}\n\nQ: {{question}}\n\nA:" +doc_to_target: 1 +doc_to_choice: ["no", "yes"] +metric_list: + - metric: f1 diff --git a/lm_eval/tasks/qasper/freeform.yaml b/lm_eval/tasks/qasper/freeform.yaml new file mode 100644 index 00000000..03324c3b --- /dev/null +++ b/lm_eval/tasks/qasper/freeform.yaml @@ -0,0 +1,16 @@ +group: qasper +task: qasper_freeform +dataset_path: qasper +output_type: greedy_until +training_split: train +validation_split: validation +process_docs: !function utils.process_docs_freeform +doc_to_text: "TITLE: {{title}}\nABSTRACT: {{abstract}}\n\nQ: {{question}}\n\nA:" +doc_to_target: answer +generation_kwargs: + until: + - "\n" +metric_list: + - metric: !function metrics.f1_abstractive + aggregation: mean + higher_is_better: true diff --git a/lm_eval/tasks/qasper/metrics.py b/lm_eval/tasks/qasper/metrics.py new file mode 100644 index 00000000..35ac489c --- /dev/null +++ b/lm_eval/tasks/qasper/metrics.py @@ -0,0 +1,35 @@ +def normalize_answer(s): + """ + Taken from the official evaluation script for v1.1 of the SQuAD dataset. + Lower text and remove punctuation, articles and extra whitespace. + """ + + def remove_articles(text): + return re.sub(r"\b(a|an|the)\b", " ", text) + + def white_space_fix(text): + return " ".join(text.split()) + + def remove_punc(text): + exclude = set(string.punctuation) + return "".join(ch for ch in text if ch not in exclude) + + def lower(text): + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(s)))) + +def f1_abstractive(prediction, ground_truth): + """ + Taken from the official evaluation script for v1.1 of the SQuAD dataset. + """ + prediction_tokens = normalize_answer(prediction).split() + ground_truth_tokens = normalize_answer(ground_truth).split() + common = Counter(prediction_tokens) & Counter(ground_truth_tokens) + num_same = sum(common.values()) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(prediction_tokens) + recall = 1.0 * num_same / len(ground_truth_tokens) + f1 = (2 * precision * recall) / (precision + recall) + return f1 \ No newline at end of file diff --git a/lm_eval/tasks/qasper/utils.py b/lm_eval/tasks/qasper/utils.py new file mode 100644 index 00000000..f9d3f231 --- /dev/null +++ b/lm_eval/tasks/qasper/utils.py @@ -0,0 +1,73 @@ +from datasets import Dataset +from functools import partial + +def process_docs(dataset, set_answer_type="bool"): + + FEATURES = [ + "title", + "abstract", + "question", + "answer", + "answer_type" + ] + + def _categorise_answer(answer_blob): + if answer_blob["unanswerable"]: + answer = "unanswerable" + answer_type = "unanswerable" + return answer, answer_type + elif answer_blob["yes_no"]: + answer = "yes" + answer_type = "bool" + return answer, answer_type + elif answer_blob["free_form_answer"]: + answer = answer_blob["free_form_answer"] + answer_type = "free form answer" + return answer, answer_type + elif answer_blob["extractive_spans"]: + answer = answer_blob["extractive_spans"] + answer_type = "extractive_spans" + return answer, answer_type + elif answer_blob["yes_no"] is False: + answer = "no" + answer_type = "bool" + return answer, answer_type + + def _flatten(doc): + """Given a `doc`, flatten it out so that each JSON blob + contains exactly one question and one answer. Logic taken from + the reference implementation available at + https://github.com/allenai/qasper-led-baseline/blob/main/scripts/evaluator.py + """ + obs_list = { + "title": [], + "abstract": [], + "question": [], + "answer": [], + "answer_type": [], + } + title = doc.pop("title") + abstract = doc.pop("abstract") + for question, answer_list in zip(doc["qas"]["question"], doc["qas"]["answers"]): + for answer_blob in answer_list["answer"]: + answer, answer_type = _categorise_answer(answer_blob) + if answer_type == set_answer_type: + obs_list["title"].append(title) + obs_list["abstract"].append(abstract) + obs_list["question"].append(question) + obs_list["answer_type"].append(answer_type) + if type(answer) == list: + answer = ", ".join(answer) + obs_list["answer"].append(answer) + + return obs_list + + dataset = dataset.map(_flatten, remove_columns=[key for key in dataset.features.keys() if key not in FEATURES]) + new_dataset = {} + for key in dataset.features.keys(): + new_dataset[key] = [x for row in dataset[key] for x in row] + + return Dataset.from_dict(new_dataset) + +process_docs_bool = partial(process_docs, set_answer_type="bool") +process_docs_freeform = partial(process_docs, set_answer_type="freeform") -- GitLab From 46bd5e46de42e7f758974f81541e7bf449f5d0f7 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 7 Sep 2023 09:59:04 +0000 Subject: [PATCH 131/264] update --- lm_eval/tasks/qasper/README.md | 6 +++--- lm_eval/tasks/qasper/utils.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lm_eval/tasks/qasper/README.md b/lm_eval/tasks/qasper/README.md index 0964f87f..647d54ea 100644 --- a/lm_eval/tasks/qasper/README.md +++ b/lm_eval/tasks/qasper/README.md @@ -42,12 +42,12 @@ Homepage: https://allenai.org/data/qasper #### Groups -* `qasper`: `Short description` +* `qasper`: executes both `qasper_bool` and `qasper_freeform` #### Tasks -* `qasper_bool`: `1-sentence description of what this particular task does` -* `qasper_freeform`: ... +* `qasper_bool`: Multiple choice task that evaluates the task with `answer_type="bool"` +* `qasper_freeform`: Greedy generation task that evaluates the samples from the task with `answer_type="free form answer"` ### Checklist diff --git a/lm_eval/tasks/qasper/utils.py b/lm_eval/tasks/qasper/utils.py index f9d3f231..537a2ab2 100644 --- a/lm_eval/tasks/qasper/utils.py +++ b/lm_eval/tasks/qasper/utils.py @@ -70,4 +70,4 @@ def process_docs(dataset, set_answer_type="bool"): return Dataset.from_dict(new_dataset) process_docs_bool = partial(process_docs, set_answer_type="bool") -process_docs_freeform = partial(process_docs, set_answer_type="freeform") +process_docs_freeform = partial(process_docs, set_answer_type="free form answer") -- GitLab From e0e0746d22f4c7b939393ad9954a831f2b1dd2d0 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 7 Sep 2023 09:59:26 +0000 Subject: [PATCH 132/264] checked mutual and qasper --- lm_eval/tasks/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/README.md b/lm_eval/tasks/README.md index e30d3f95..b95012b5 100644 --- a/lm_eval/tasks/README.md +++ b/lm_eval/tasks/README.md @@ -16,7 +16,7 @@ Boxes should be checked iff tasks are implemented in the refactor and tested for - [x] MCTACO - [x] Pubmed QA - [x] SciQ -- [ ] QASPER +- [x] QASPER - [x] QA4MRE - [x] TriviaQA - [x] AI2 ARC @@ -36,7 +36,7 @@ Boxes should be checked iff tasks are implemented in the refactor and tested for - [x] TruthfulQA (mc1) - [x] TruthfulQA (mc2) - [x] TruthfulQA (gen) -- [ ] MuTual +- [x] MuTual - [ ] Hendrycks Math (Hailey) - [x] Asdiv - [ ] GSM8k -- GitLab From d3d1b7f1c46a6d173dd484a1dda7bf9c4703f894 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 7 Sep 2023 13:19:13 +0000 Subject: [PATCH 133/264] fixed import --- lm_eval/tasks/qasper/metrics.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/lm_eval/tasks/qasper/metrics.py b/lm_eval/tasks/qasper/metrics.py index 35ac489c..d4c08670 100644 --- a/lm_eval/tasks/qasper/metrics.py +++ b/lm_eval/tasks/qasper/metrics.py @@ -1,3 +1,5 @@ +import string + def normalize_answer(s): """ Taken from the official evaluation script for v1.1 of the SQuAD dataset. @@ -19,17 +21,17 @@ def normalize_answer(s): return white_space_fix(remove_articles(remove_punc(lower(s)))) -def f1_abstractive(prediction, ground_truth): +def f1_abstractive(predictions, references): """ Taken from the official evaluation script for v1.1 of the SQuAD dataset. """ - prediction_tokens = normalize_answer(prediction).split() - ground_truth_tokens = normalize_answer(ground_truth).split() - common = Counter(prediction_tokens) & Counter(ground_truth_tokens) + prediction_tokens = normalize_answer(predictions[0]).split() + references_tokens = normalize_answer(references[0]).split() + common = Counter(prediction_tokens) & Counter(references_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) - recall = 1.0 * num_same / len(ground_truth_tokens) + recall = 1.0 * num_same / len(references_tokens) f1 = (2 * precision * recall) / (precision + recall) - return f1 \ No newline at end of file + return f1 -- GitLab From 4b711aa661d2cd2e15f8495e4133b971dd195cc2 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 7 Sep 2023 13:19:34 +0000 Subject: [PATCH 134/264] normalize path --- lm_eval/utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lm_eval/utils.py b/lm_eval/utils.py index c63e8daf..86b19c70 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -395,8 +395,10 @@ def import_function(loader, node): function_name = loader.construct_scalar(node) yaml_path = os.path.dirname(loader.name) - module_name, function_name = function_name.split(".") - module_path = os.path.join(yaml_path, "{}.py".format(module_name)) + *module_name, function_name = function_name.split(".") + if type(module_name) == list: + module_name = ".".join(module_name) + module_path = os.path.normpath(os.path.join(yaml_path, "{}.py".format(module_name))) spec = importlib.util.spec_from_file_location(module_name, module_path) module = importlib.util.module_from_spec(spec) -- GitLab From d1f502709b9720f11222d09c04b7c86758115e75 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 7 Sep 2023 13:35:45 +0000 Subject: [PATCH 135/264] add scrolls --- lm_eval/tasks/scrolls/README.md | 84 +++++++++++++++++++ .../tasks/scrolls/contractnli/default.yaml | 9 ++ lm_eval/tasks/scrolls/govreport/default.yaml | 4 + .../tasks/scrolls/narrativeqa/default.yaml | 15 ++++ lm_eval/tasks/scrolls/preprocessors.py | 77 +++++++++++++++++ lm_eval/tasks/scrolls/pruned_tasks.py | 4 + lm_eval/tasks/scrolls/qasper/default.yaml | 15 ++++ lm_eval/tasks/scrolls/qasper/utils.py | 25 ++++++ lm_eval/tasks/scrolls/qmsum/default.yaml | 5 ++ lm_eval/tasks/scrolls/quality/default.yaml | 7 ++ lm_eval/tasks/scrolls/quality/utils.py | 27 ++++++ .../scrolls/scroll_multiplechoice_task_yaml | 11 +++ .../tasks/scrolls/scroll_summary_task_yaml | 15 ++++ .../tasks/scrolls/summscreenfd/default.yaml | 4 + 14 files changed, 302 insertions(+) create mode 100644 lm_eval/tasks/scrolls/README.md create mode 100644 lm_eval/tasks/scrolls/contractnli/default.yaml create mode 100644 lm_eval/tasks/scrolls/govreport/default.yaml create mode 100644 lm_eval/tasks/scrolls/narrativeqa/default.yaml create mode 100644 lm_eval/tasks/scrolls/preprocessors.py create mode 100644 lm_eval/tasks/scrolls/pruned_tasks.py create mode 100644 lm_eval/tasks/scrolls/qasper/default.yaml create mode 100644 lm_eval/tasks/scrolls/qasper/utils.py create mode 100644 lm_eval/tasks/scrolls/qmsum/default.yaml create mode 100644 lm_eval/tasks/scrolls/quality/default.yaml create mode 100644 lm_eval/tasks/scrolls/quality/utils.py create mode 100644 lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml create mode 100644 lm_eval/tasks/scrolls/scroll_summary_task_yaml create mode 100644 lm_eval/tasks/scrolls/summscreenfd/default.yaml diff --git a/lm_eval/tasks/scrolls/README.md b/lm_eval/tasks/scrolls/README.md new file mode 100644 index 00000000..193a1179 --- /dev/null +++ b/lm_eval/tasks/scrolls/README.md @@ -0,0 +1,84 @@ +# SCROLLS + +### Paper + +Title: `SCROLLS: Standardized CompaRison Over Long Language Sequences` + +Abstract: https://arxiv.org/abs/2201.03533 + +SCROLLS is a suite of datasets that require synthesizing information over long texts. +The benchmark includes seven natural language tasks across multiple domains, +including summarization, question answering, and natural language inference. + +Homepage: https://www.scrolls-benchmark.com/ + +Since SCROLLS tasks are generally longer than the maximum sequence length of many models, +it is possible to create "subset" tasks that contain only those samples whose tokenized length +is less than some pre-defined limit. For example, to create a subset of "Qasper" that would +be suitable for a model using the GPTNeoX tokenizer and a 4K maximium sequence length: + +``` +class QasperGPTNeoX4K(Qasper): + PRUNE_TOKENIZERS = ["EleutherAI/pythia-410m-deduped"] + PRUNE_MAX_TOKENS = 4096 + PRUNE_NUM_PROC = _num_cpu_cores() # optional, to speed up pruning of large datasets like NarrativeQA +``` + +`PRUNE_TOKENIZERS` can contain more than one tokenizer; this will include only samples that are +less than `PRUNE_MAX_TOKENS` for ALL of the tokenizers. This can be useful to comparing models +that use different tokenizers but the same maximum sequence length. + +Once the subset task class has been defined in this file, it can be used by adding the class +to `lm_eval/tasks/__init__.py`. + +NOTE: GovReport may need `max_gen_toks` set larger for causal models. + +### Citation + +``` +@inproceedings{shaham-etal-2022-scrolls, + title = "{SCROLLS}: Standardized {C}ompa{R}ison Over Long Language Sequences", + author = "Shaham, Uri and + Segal, Elad and + Ivgi, Maor and + Efrat, Avia and + Yoran, Ori and + Haviv, Adi and + Gupta, Ankit and + Xiong, Wenhan and + Geva, Mor and + Berant, Jonathan and + Levy, Omer", + booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", + month = dec, + year = "2022", + address = "Abu Dhabi, United Arab Emirates", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2022.emnlp-main.823", + pages = "12007--12021" +} +``` + +### Groups and Tasks + +#### Groups + +* `qasper`: executes both `qasper_bool` and `qasper_freeform` + +#### Tasks + +* `qasper_bool`: Multiple choice task that evaluates the task with `answer_type="bool"` +* `qasper_freeform`: Greedy generation task that evaluates the samples from the task with `answer_type="free form answer"` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/scrolls/contractnli/default.yaml b/lm_eval/tasks/scrolls/contractnli/default.yaml new file mode 100644 index 00000000..fc446dd5 --- /dev/null +++ b/lm_eval/tasks/scrolls/contractnli/default.yaml @@ -0,0 +1,9 @@ +include: ../scroll_multiplechoice_task_yaml +task: scrolls_contractnli +dataset_name: contract_nli +process_docs: !function ../preprocessors.process_docs_prepended_question +doc_to_text: "{{text}}\n\nHypothesis: {{question}}\nConclusion:" +doc_to_target: "{{outputs[0]}}" +doc_to_choice: ["Not mentioned", "Entailment", "Contradiction"] +should_decontaminate: true +doc_to_decontamination_query: input diff --git a/lm_eval/tasks/scrolls/govreport/default.yaml b/lm_eval/tasks/scrolls/govreport/default.yaml new file mode 100644 index 00000000..80e0246b --- /dev/null +++ b/lm_eval/tasks/scrolls/govreport/default.yaml @@ -0,0 +1,4 @@ +include: ../scroll_summary_task_yaml +task: scrolls_govreport +dataset_path: tau/scrolls +dataset_name: gov_report diff --git a/lm_eval/tasks/scrolls/narrativeqa/default.yaml b/lm_eval/tasks/scrolls/narrativeqa/default.yaml new file mode 100644 index 00000000..e429db3e --- /dev/null +++ b/lm_eval/tasks/scrolls/narrativeqa/default.yaml @@ -0,0 +1,15 @@ +include: ../scroll_multiplechoice_task_yaml +task: scrolls_narrativeqa +dataset_name: narrative_qa +output_type: greedy_until +process_docs: !function ../preprocessors.process_docs_prepended_question +doc_to_text: "{{text}}\n\nHypothesis: {{question}}\nConclusion:" +doc_to_target: "{{outputs[0]}}" +doc_to_choice: ["Not mentioned", "Entailment", "Contradiction"] +should_decontaminate: true +doc_to_decontamination_query: input +metric_list: + - metric: f1 +generation_kwargs: + until: + - "\n" diff --git a/lm_eval/tasks/scrolls/preprocessors.py b/lm_eval/tasks/scrolls/preprocessors.py new file mode 100644 index 00000000..4f5df832 --- /dev/null +++ b/lm_eval/tasks/scrolls/preprocessors.py @@ -0,0 +1,77 @@ +from functools import partial + +from transformers import AutoTokenizer + +def _num_cpu_cores(): + # https://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of-cpus-using-python/55423170#55423170 + try: + import psutil + return psutil.cpu_count(logical=False) + except ImportError: + import os + return len(os.sched_getaffinity(0)) + +def process_docs(dataset, custom_process=None, PRUNE_TOKENIZERS=[], PRUNE_MAX_TOKENS=4096, PRUNE_NUM_PROC=_num_cpu_cores()): + + def _drop_duplicates_in_input(untokenized_dataset): + # from scrolls/evaluator/dataset_evaluator.py + + indices_to_keep = [] + id_to_idx = {} + outputs = [] + for i, (id_, output) in enumerate(zip(untokenized_dataset["id"], untokenized_dataset["output"])): + if id_ in id_to_idx: + outputs[id_to_idx[id_]].append(output) + continue + indices_to_keep.append(i) + id_to_idx[id_] = len(outputs) + outputs.append([output]) + untokenized_dataset = untokenized_dataset.select(indices_to_keep).flatten_indices() + untokenized_dataset = untokenized_dataset.remove_columns("output") + untokenized_dataset = untokenized_dataset.add_column("outputs", outputs) + return untokenized_dataset + + dataset = _drop_duplicates_in_input(dataset) + if custom_process is not None: + dataset = dataset.map(custom_process) + + if len(PRUNE_TOKENIZERS) > 0: + tokenizers = [AutoTokenizer.from_pretrained(tokenizer) for tokenizer in PRUNE_TOKENIZERS] + cache = {} + + def _get_prune_text(doc): + return doc_to_text(doc) + + def _filter(sample): + text = _get_prune_text(sample) + cached = cache.get(text, None) + if cached is None: + for tokenizer in tokenizers: + if len(tokenizer(text).input_ids) > PRUNE_MAX_TOKENS: + cache[text] = False + return False + cache[text] = True + return True + else: + return cached + + dataset = dataset.filter(_filter, num_proc=PRUNE_NUM_PROC) + + return dataset + +def _doc_prepended_question(doc): + # "When a query is given in addition to the raw text (as + # in QMSum, Qasper, NarrativeQA, QuALITY, and ContractNLI), + # we prepend it to the text, using two newlines as a natural separator" + input = doc["input"] + split = input.find("\n\n") + return { + "id": doc["id"], + "pid": doc["pid"], + "input": input, + "outputs": doc["outputs"], + "question": input[0:split], + "text": input[split + 2:] + } + +process_docs_prepended_question = partial(process_docs, custom_process=_doc_prepended_question) \ No newline at end of file diff --git a/lm_eval/tasks/scrolls/pruned_tasks.py b/lm_eval/tasks/scrolls/pruned_tasks.py new file mode 100644 index 00000000..b0a06377 --- /dev/null +++ b/lm_eval/tasks/scrolls/pruned_tasks.py @@ -0,0 +1,4 @@ +from functools import partial +from preprocessors import _doc_prepended_question + +process_docs_prepended_questionGPTNeoX4K = partial(process_docs, custom_process=_doc_prepended_question) \ No newline at end of file diff --git a/lm_eval/tasks/scrolls/qasper/default.yaml b/lm_eval/tasks/scrolls/qasper/default.yaml new file mode 100644 index 00000000..b76b09d4 --- /dev/null +++ b/lm_eval/tasks/scrolls/qasper/default.yaml @@ -0,0 +1,15 @@ +group: scrolls +task: scrolls_qasper +dataset_path: tau/scrolls +dataset_name: qasper +output_type: multiple_choice +training_split: train +validation_split: validation +process_docs: !function ../preprocessors.process_docs_prepended_question +doc_to_text: "{{text}}\n\nHypothesis: {{question}}\nConclusion:" +doc_to_target: "{{outputs[0]}}" +doc_to_choice: ["yes", "no"] +should_decontaminate: true +doc_to_decontamination_query: input +metric_list: + - metric: f1 diff --git a/lm_eval/tasks/scrolls/qasper/utils.py b/lm_eval/tasks/scrolls/qasper/utils.py new file mode 100644 index 00000000..49963b6d --- /dev/null +++ b/lm_eval/tasks/scrolls/qasper/utils.py @@ -0,0 +1,25 @@ +import transformers.data.metrics.squad_metrics as squad_metrics + +def process_docs(dataset): + + dataset = process_docs_prepended_question(dataset) + + def _process_doc(doc): + + doc["is_yes_no"] = reduce(lambda prev, cur: prev and squad_metrics.normalize_answer(cur) + in ["yes", "no"], doc["outputs"], True) + + return doc + + return dataset.map(_process_doc) + +def process_results(doc, results): + if doc["is_yes_no"]: + prediction = " yes" if results[0] > results[1] else " no" + elif len(results[0].strip()) == 0: + prediction = "Unanswerable" + else: + prediction = results[0] + return { + "f1": (prediction, doc["outputs"]) + } \ No newline at end of file diff --git a/lm_eval/tasks/scrolls/qmsum/default.yaml b/lm_eval/tasks/scrolls/qmsum/default.yaml new file mode 100644 index 00000000..a5caf807 --- /dev/null +++ b/lm_eval/tasks/scrolls/qmsum/default.yaml @@ -0,0 +1,5 @@ +include: ../scroll_summary_task_yaml +task: scrolls_qmsum +dataset_path: tau/scrolls +dataset_name: qmsum +process_docs: !function ../preprocessors.process_docs_prepended_question diff --git a/lm_eval/tasks/scrolls/quality/default.yaml b/lm_eval/tasks/scrolls/quality/default.yaml new file mode 100644 index 00000000..b05e1196 --- /dev/null +++ b/lm_eval/tasks/scrolls/quality/default.yaml @@ -0,0 +1,7 @@ +include: ../scroll_multiplechoice_task_yaml +task: scrolls_quality +dataset_name: quality +process_docs: !function utils.process_docs +doc_to_text: "{{text}}\n\nQuestion: {{question}}\nAnswer:" +doc_to_target: gold +doc_to_choice: "{{choices}}" diff --git a/lm_eval/tasks/scrolls/quality/utils.py b/lm_eval/tasks/scrolls/quality/utils.py new file mode 100644 index 00000000..a1aa87e4 --- /dev/null +++ b/lm_eval/tasks/scrolls/quality/utils.py @@ -0,0 +1,27 @@ +import re +from functools import partial + +from ..preprocessors import process_docs_prepended_question + +def process_docs(dataset): + + dataset = process_docs_prepended_question(dataset) + + _multiple_choice_pattern = re.compile(r" *\([A-D]\) *") + + def _normalize_answer(text): + return " ".join(text.split()).strip() + + def _process_doc(doc): + + split = doc["text"].find("\n\n", doc["text"].find("(D)")) + choices_text = doc["text"][:split] + + doc["text"] = doc["text"][split:].strip() + doc["choices"] = [_normalize_answer(choice) for choice in re.split( + _multiple_choice_pattern, choices_text)[1:]] + doc["gold"] = doc["choices"].index(_normalize_answer(doc["outputs"][0])) + + return doc + + return dataset.map(_process_doc) diff --git a/lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml b/lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml new file mode 100644 index 00000000..f91e06a5 --- /dev/null +++ b/lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml @@ -0,0 +1,11 @@ +group: scrolls +dataset_path: tau/scrolls +output_type: multiple_choice +training_split: train +validation_split: validation +process_docs: !function ../preprocessors.process_docs +should_decontaminate: true +doc_to_decontamination_query: input +metric_list: + - metric: acc + - metric: acc_norm diff --git a/lm_eval/tasks/scrolls/scroll_summary_task_yaml b/lm_eval/tasks/scrolls/scroll_summary_task_yaml new file mode 100644 index 00000000..e12cdd24 --- /dev/null +++ b/lm_eval/tasks/scrolls/scroll_summary_task_yaml @@ -0,0 +1,15 @@ +group: scrolls +dataset_path: tau/scrolls +output_type: greedy_until +training_split: train +validation_split: validation +process_docs: !function ../preprocessors.process_docs +doc_to_text: "{{input}}\n\nQuestion: What is a summary of the preceding text?\nAnswer:" +doc_to_target: "{{outputs.join(", ")}}" +should_decontaminate: true +doc_to_decontamination_query: input +metric_list: + - metric: f1 +generation_kwargs: + until: + - "\n" diff --git a/lm_eval/tasks/scrolls/summscreenfd/default.yaml b/lm_eval/tasks/scrolls/summscreenfd/default.yaml new file mode 100644 index 00000000..542a3ba1 --- /dev/null +++ b/lm_eval/tasks/scrolls/summscreenfd/default.yaml @@ -0,0 +1,4 @@ +include: ../scroll_summary_task_yaml +task: scrolls_govreport +dataset_path: tau/scrolls +dataset_name: summ_screen_fd -- GitLab From 7a46245317ecf99e8085943e0908862b377e97f9 Mon Sep 17 00:00:00 2001 From: Hojjat Mokhtarabadi Date: Sun, 10 Sep 2023 15:45:23 +0330 Subject: [PATCH 136/264] undefined type and output_type when using promptsource fixed --- lm_eval/tasks/__init__.py | 3 +-- scripts/write_out.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 6828b233..53fae225 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -60,8 +60,7 @@ def check_prompt_config(config: dict[str, str]) -> List[dict[str, str]]: prompt_variation, ] ) - }, - **{"output_type": "greedy_until"}, + } } ) else: diff --git a/scripts/write_out.py b/scripts/write_out.py index 018c063c..e7288f2d 100644 --- a/scripts/write_out.py +++ b/scripts/write_out.py @@ -48,7 +48,7 @@ def main(): docs = join_iters(iters) - with open(os.path.join(args.output_base_path, task_name), "w") as f: + with open(os.path.join(args.output_base_path, task_name), "w", encoding='utf8') as f: for i, doc in ( zip(range(args.num_examples), docs) if args.num_examples > 0 -- GitLab From c87703f314d0b411344fafa26653cca482c46316 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 11 Sep 2023 08:19:10 +0000 Subject: [PATCH 137/264] update --- lm_eval/tasks/scrolls/narrativeqa/default.yaml | 7 ++++--- lm_eval/tasks/scrolls/qasper/default.yaml | 15 --------------- lm_eval/tasks/scrolls/quality/utils.py | 4 +++- .../tasks/scrolls/scroll_multiplechoice_task_yaml | 2 +- lm_eval/tasks/scrolls/scroll_summary_task_yaml | 14 +++++++++++--- lm_eval/tasks/scrolls/summscreenfd/default.yaml | 2 +- lm_eval/utils.py | 3 +-- 7 files changed, 21 insertions(+), 26 deletions(-) delete mode 100644 lm_eval/tasks/scrolls/qasper/default.yaml diff --git a/lm_eval/tasks/scrolls/narrativeqa/default.yaml b/lm_eval/tasks/scrolls/narrativeqa/default.yaml index e429db3e..db172672 100644 --- a/lm_eval/tasks/scrolls/narrativeqa/default.yaml +++ b/lm_eval/tasks/scrolls/narrativeqa/default.yaml @@ -3,13 +3,14 @@ task: scrolls_narrativeqa dataset_name: narrative_qa output_type: greedy_until process_docs: !function ../preprocessors.process_docs_prepended_question -doc_to_text: "{{text}}\n\nHypothesis: {{question}}\nConclusion:" -doc_to_target: "{{outputs[0]}}" -doc_to_choice: ["Not mentioned", "Entailment", "Contradiction"] +doc_to_text: "{{text}}\n\nQuestion: {{question}}\nAnswer:" +doc_to_target: "{{outputs| join(', ')}}" should_decontaminate: true doc_to_decontamination_query: input metric_list: - metric: f1 + aggregation: !function ../metrics.agg_f1 + higher_is_better: true generation_kwargs: until: - "\n" diff --git a/lm_eval/tasks/scrolls/qasper/default.yaml b/lm_eval/tasks/scrolls/qasper/default.yaml deleted file mode 100644 index b76b09d4..00000000 --- a/lm_eval/tasks/scrolls/qasper/default.yaml +++ /dev/null @@ -1,15 +0,0 @@ -group: scrolls -task: scrolls_qasper -dataset_path: tau/scrolls -dataset_name: qasper -output_type: multiple_choice -training_split: train -validation_split: validation -process_docs: !function ../preprocessors.process_docs_prepended_question -doc_to_text: "{{text}}\n\nHypothesis: {{question}}\nConclusion:" -doc_to_target: "{{outputs[0]}}" -doc_to_choice: ["yes", "no"] -should_decontaminate: true -doc_to_decontamination_query: input -metric_list: - - metric: f1 diff --git a/lm_eval/tasks/scrolls/quality/utils.py b/lm_eval/tasks/scrolls/quality/utils.py index a1aa87e4..d2d7eea5 100644 --- a/lm_eval/tasks/scrolls/quality/utils.py +++ b/lm_eval/tasks/scrolls/quality/utils.py @@ -1,7 +1,9 @@ import re from functools import partial -from ..preprocessors import process_docs_prepended_question +import sys +sys.path.append('..') +from preprocessors import process_docs_prepended_question def process_docs(dataset): diff --git a/lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml b/lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml index f91e06a5..0d29e639 100644 --- a/lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml +++ b/lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml @@ -3,7 +3,7 @@ dataset_path: tau/scrolls output_type: multiple_choice training_split: train validation_split: validation -process_docs: !function ../preprocessors.process_docs +process_docs: !function preprocessors.process_docs should_decontaminate: true doc_to_decontamination_query: input metric_list: diff --git a/lm_eval/tasks/scrolls/scroll_summary_task_yaml b/lm_eval/tasks/scrolls/scroll_summary_task_yaml index e12cdd24..a464ccab 100644 --- a/lm_eval/tasks/scrolls/scroll_summary_task_yaml +++ b/lm_eval/tasks/scrolls/scroll_summary_task_yaml @@ -3,13 +3,21 @@ dataset_path: tau/scrolls output_type: greedy_until training_split: train validation_split: validation -process_docs: !function ../preprocessors.process_docs +process_docs: !function preprocessors.process_docs doc_to_text: "{{input}}\n\nQuestion: What is a summary of the preceding text?\nAnswer:" -doc_to_target: "{{outputs.join(", ")}}" +doc_to_target: "{{outputs|join(', ')}}" should_decontaminate: true doc_to_decontamination_query: input metric_list: - - metric: f1 + - metric: !function metrics.rouge1 + aggregation: mean + higher_is_better: true + - metric: !function metrics.rouge2 + aggregation: mean + higher_is_better: true + - metric: !function metrics.rougeL + aggregation: mean + higher_is_better: true generation_kwargs: until: - "\n" diff --git a/lm_eval/tasks/scrolls/summscreenfd/default.yaml b/lm_eval/tasks/scrolls/summscreenfd/default.yaml index 542a3ba1..7ecabfae 100644 --- a/lm_eval/tasks/scrolls/summscreenfd/default.yaml +++ b/lm_eval/tasks/scrolls/summscreenfd/default.yaml @@ -1,4 +1,4 @@ include: ../scroll_summary_task_yaml -task: scrolls_govreport +task: scrolls_summscreenfd dataset_path: tau/scrolls dataset_name: summ_screen_fd diff --git a/lm_eval/utils.py b/lm_eval/utils.py index 86b19c70..ac9f7c75 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -432,8 +432,7 @@ def load_yaml_config(yaml_path): # If not found, assume the included yaml # is in the same dir as the original yaml if not os.path.isfile(path): - path = os.path.join(yaml_dir, path) - + path = os.path.normpath(os.path.join(yaml_dir, path)) try: included_yaml_config = load_yaml_config(path) final_yaml_config.update(included_yaml_config) -- GitLab From e6939479c17f5bc3901f653cbeaf8de66bbe96c0 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 11 Sep 2023 09:46:01 +0000 Subject: [PATCH 138/264] english cot, and added readable text in utils.py --- .../mgsm/{native_cot => en_cot}/cot_yaml | 6 ++ lm_eval/tasks/mgsm/en_cot/mgsm_bn_en-cot.yaml | 6 ++ lm_eval/tasks/mgsm/en_cot/mgsm_de_en-cot.yaml | 6 ++ lm_eval/tasks/mgsm/en_cot/mgsm_en_en-cot.yaml | 6 ++ lm_eval/tasks/mgsm/en_cot/mgsm_es_en-cot.yaml | 6 ++ lm_eval/tasks/mgsm/en_cot/mgsm_fr_en-cot.yaml | 6 ++ lm_eval/tasks/mgsm/en_cot/mgsm_ja_en-cot.yaml | 6 ++ lm_eval/tasks/mgsm/en_cot/mgsm_ru_en-cot.yaml | 6 ++ lm_eval/tasks/mgsm/en_cot/mgsm_sw_en-cot.yaml | 6 ++ lm_eval/tasks/mgsm/en_cot/mgsm_te_en-cot.yaml | 6 ++ lm_eval/tasks/mgsm/en_cot/mgsm_th_en-cot.yaml | 6 ++ lm_eval/tasks/mgsm/en_cot/mgsm_zh_en-cot.yaml | 6 ++ .../mgsm/native_cot/mgsm_cot_native_bn.yaml | 14 ---- .../mgsm/native_cot/mgsm_cot_native_de.yaml | 14 ---- .../mgsm/native_cot/mgsm_cot_native_en.yaml | 14 ---- .../mgsm/native_cot/mgsm_cot_native_es.yaml | 14 ---- .../mgsm/native_cot/mgsm_cot_native_fr.yaml | 14 ---- .../mgsm/native_cot/mgsm_cot_native_ja.yaml | 14 ---- .../mgsm/native_cot/mgsm_cot_native_ru.yaml | 14 ---- .../mgsm/native_cot/mgsm_cot_native_sw.yaml | 14 ---- .../mgsm/native_cot/mgsm_cot_native_te.yaml | 14 ---- .../mgsm/native_cot/mgsm_cot_native_th.yaml | 14 ---- .../mgsm/native_cot/mgsm_cot_native_zh.yaml | 14 ---- lm_eval/tasks/mgsm/utils.py | 65 +++++++++++++------ 24 files changed, 116 insertions(+), 175 deletions(-) rename lm_eval/tasks/mgsm/{native_cot => en_cot}/cot_yaml (79%) create mode 100644 lm_eval/tasks/mgsm/en_cot/mgsm_bn_en-cot.yaml create mode 100644 lm_eval/tasks/mgsm/en_cot/mgsm_de_en-cot.yaml create mode 100644 lm_eval/tasks/mgsm/en_cot/mgsm_en_en-cot.yaml create mode 100644 lm_eval/tasks/mgsm/en_cot/mgsm_es_en-cot.yaml create mode 100644 lm_eval/tasks/mgsm/en_cot/mgsm_fr_en-cot.yaml create mode 100644 lm_eval/tasks/mgsm/en_cot/mgsm_ja_en-cot.yaml create mode 100644 lm_eval/tasks/mgsm/en_cot/mgsm_ru_en-cot.yaml create mode 100644 lm_eval/tasks/mgsm/en_cot/mgsm_sw_en-cot.yaml create mode 100644 lm_eval/tasks/mgsm/en_cot/mgsm_te_en-cot.yaml create mode 100644 lm_eval/tasks/mgsm/en_cot/mgsm_th_en-cot.yaml create mode 100644 lm_eval/tasks/mgsm/en_cot/mgsm_zh_en-cot.yaml delete mode 100644 lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml delete mode 100644 lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml delete mode 100644 lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml delete mode 100644 lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml delete mode 100644 lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml delete mode 100644 lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml delete mode 100644 lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml delete mode 100644 lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml delete mode 100644 lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml delete mode 100644 lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml delete mode 100644 lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml diff --git a/lm_eval/tasks/mgsm/native_cot/cot_yaml b/lm_eval/tasks/mgsm/en_cot/cot_yaml similarity index 79% rename from lm_eval/tasks/mgsm/native_cot/cot_yaml rename to lm_eval/tasks/mgsm/en_cot/cot_yaml index 08182e0d..94fa76be 100644 --- a/lm_eval/tasks/mgsm/native_cot/cot_yaml +++ b/lm_eval/tasks/mgsm/en_cot/cot_yaml @@ -21,3 +21,9 @@ metric_list: higher_is_better: true ignore_case: true ignore_punctuation: true +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)" + - function: "take_first" \ No newline at end of file diff --git a/lm_eval/tasks/mgsm/en_cot/mgsm_bn_en-cot.yaml b/lm_eval/tasks/mgsm/en_cot/mgsm_bn_en-cot.yaml new file mode 100644 index 00000000..8cdcf4a4 --- /dev/null +++ b/lm_eval/tasks/mgsm/en_cot/mgsm_bn_en-cot.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: bn +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"প্রশ্ন: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: cot_yaml +task: mgsm_bn_direct diff --git a/lm_eval/tasks/mgsm/en_cot/mgsm_de_en-cot.yaml b/lm_eval/tasks/mgsm/en_cot/mgsm_de_en-cot.yaml new file mode 100644 index 00000000..01bc74fc --- /dev/null +++ b/lm_eval/tasks/mgsm/en_cot/mgsm_de_en-cot.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: de +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Frage: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: cot_yaml +task: mgsm_de_direct diff --git a/lm_eval/tasks/mgsm/en_cot/mgsm_en_en-cot.yaml b/lm_eval/tasks/mgsm/en_cot/mgsm_en_en-cot.yaml new file mode 100644 index 00000000..110336f1 --- /dev/null +++ b/lm_eval/tasks/mgsm/en_cot/mgsm_en_en-cot.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: en +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: cot_yaml +task: mgsm_en_direct diff --git a/lm_eval/tasks/mgsm/en_cot/mgsm_es_en-cot.yaml b/lm_eval/tasks/mgsm/en_cot/mgsm_es_en-cot.yaml new file mode 100644 index 00000000..ed634851 --- /dev/null +++ b/lm_eval/tasks/mgsm/en_cot/mgsm_es_en-cot.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: es +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Pregunta: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: cot_yaml +task: mgsm_es_direct diff --git a/lm_eval/tasks/mgsm/en_cot/mgsm_fr_en-cot.yaml b/lm_eval/tasks/mgsm/en_cot/mgsm_fr_en-cot.yaml new file mode 100644 index 00000000..c9241eef --- /dev/null +++ b/lm_eval/tasks/mgsm/en_cot/mgsm_fr_en-cot.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: fr +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question : "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: cot_yaml +task: mgsm_fr_direct diff --git a/lm_eval/tasks/mgsm/en_cot/mgsm_ja_en-cot.yaml b/lm_eval/tasks/mgsm/en_cot/mgsm_ja_en-cot.yaml new file mode 100644 index 00000000..184ecfc5 --- /dev/null +++ b/lm_eval/tasks/mgsm/en_cot/mgsm_ja_en-cot.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: ja +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"問題: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: cot_yaml +task: mgsm_ja_direct diff --git a/lm_eval/tasks/mgsm/en_cot/mgsm_ru_en-cot.yaml b/lm_eval/tasks/mgsm/en_cot/mgsm_ru_en-cot.yaml new file mode 100644 index 00000000..244d1e1e --- /dev/null +++ b/lm_eval/tasks/mgsm/en_cot/mgsm_ru_en-cot.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: ru +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Задача: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: cot_yaml +task: mgsm_ru_direct diff --git a/lm_eval/tasks/mgsm/en_cot/mgsm_sw_en-cot.yaml b/lm_eval/tasks/mgsm/en_cot/mgsm_sw_en-cot.yaml new file mode 100644 index 00000000..4ec09e7f --- /dev/null +++ b/lm_eval/tasks/mgsm/en_cot/mgsm_sw_en-cot.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: sw +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Swali: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: cot_yaml +task: mgsm_sw_direct diff --git a/lm_eval/tasks/mgsm/en_cot/mgsm_te_en-cot.yaml b/lm_eval/tasks/mgsm/en_cot/mgsm_te_en-cot.yaml new file mode 100644 index 00000000..97d2d477 --- /dev/null +++ b/lm_eval/tasks/mgsm/en_cot/mgsm_te_en-cot.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: te +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"ప్రశ్న: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: cot_yaml +task: mgsm_te_direct diff --git a/lm_eval/tasks/mgsm/en_cot/mgsm_th_en-cot.yaml b/lm_eval/tasks/mgsm/en_cot/mgsm_th_en-cot.yaml new file mode 100644 index 00000000..2990ff8e --- /dev/null +++ b/lm_eval/tasks/mgsm/en_cot/mgsm_th_en-cot.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: th +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"โจทย์: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: cot_yaml +task: mgsm_th_direct diff --git a/lm_eval/tasks/mgsm/en_cot/mgsm_zh_en-cot.yaml b/lm_eval/tasks/mgsm/en_cot/mgsm_zh_en-cot.yaml new file mode 100644 index 00000000..8a3b241d --- /dev/null +++ b/lm_eval/tasks/mgsm/en_cot/mgsm_zh_en-cot.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: zh +doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"问题: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +include: cot_yaml +task: mgsm_zh_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml deleted file mode 100644 index ce050085..00000000 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by utils.py -dataset_name: bn -doc_to_target: '{% if answer is not none %}{{answer[16+1]}}{% else %}{{answer_number|string}}{% - endif %}' -doc_to_text: '{% if answer is not none %}{{question+"\nধাপে ধাপে উত্তর:"}}{% else - %}{{"প্রশ্ন: "+question+"\nধাপে ধাপে উত্তর:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer -include: cot_yaml -task: mgsm_bn_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml deleted file mode 100644 index ff8d789b..00000000 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by utils.py -dataset_name: de -doc_to_target: '{% if answer is not none %}{{answer[28+1]}}{% else %}{{answer_number|string}}{% - endif %}' -doc_to_text: '{% if answer is not none %}{{question+"\nSchritt-für-Schritt-Antwort:"}}{% - else %}{{"Frage: "+question+"\nSchritt-für-Schritt-Antwort:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer -include: cot_yaml -task: mgsm_de_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml deleted file mode 100644 index 9174cc90..00000000 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by utils.py -dataset_name: en -doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_number|string}}{% - endif %}' -doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else - %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer -include: cot_yaml -task: mgsm_en_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml deleted file mode 100644 index ed223378..00000000 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by utils.py -dataset_name: es -doc_to_target: '{% if answer is not none %}{{answer[22+1]}}{% else %}{{answer_number|string}}{% - endif %}' -doc_to_text: '{% if answer is not none %}{{question+"\nRespuesta paso a paso:"}}{% - else %}{{"Pregunta: "+question+"\nRespuesta paso a paso:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer -include: cot_yaml -task: mgsm_es_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml deleted file mode 100644 index 4aa4a252..00000000 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by utils.py -dataset_name: fr -doc_to_target: '{% if answer is not none %}{{answer[25+1]}}{% else %}{{answer_number|string}}{% - endif %}' -doc_to_text: '{% if answer is not none %}{{question+"\nRéponse étape par étape :"}}{% - else %}{{"Question : "+question+"\nRéponse étape par étape :"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer -include: cot_yaml -task: mgsm_fr_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml deleted file mode 100644 index 282c1f4c..00000000 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by utils.py -dataset_name: ja -doc_to_target: '{% if answer is not none %}{{answer[10+1]}}{% else %}{{answer_number|string}}{% - endif %}' -doc_to_text: '{% if answer is not none %}{{question+"\nステップごとの答え:"}}{% else %}{{"問題: - "+question+"\nステップごとの答え:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer -include: cot_yaml -task: mgsm_ja_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml deleted file mode 100644 index b5fd4128..00000000 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by utils.py -dataset_name: ru -doc_to_target: '{% if answer is not none %}{{answer[17+1]}}{% else %}{{answer_number|string}}{% - endif %}' -doc_to_text: '{% if answer is not none %}{{question+"\nПошаговоерешение:"}}{% else - %}{{"Задача: "+question+"\nПошаговоерешение:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer -include: cot_yaml -task: mgsm_ru_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml deleted file mode 100644 index 63655f65..00000000 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by utils.py -dataset_name: sw -doc_to_target: '{% if answer is not none %}{{answer[24+1]}}{% else %}{{answer_number|string}}{% - endif %}' -doc_to_text: '{% if answer is not none %}{{question+"\nJibu la Hatua kwa Hatua:"}}{% - else %}{{"Swali: "+question+"\nJibu la Hatua kwa Hatua:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer -include: cot_yaml -task: mgsm_sw_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml deleted file mode 100644 index 08bb1dd3..00000000 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by utils.py -dataset_name: te -doc_to_target: '{% if answer is not none %}{{answer[18+1]}}{% else %}{{answer_number|string}}{% - endif %}' -doc_to_text: '{% if answer is not none %}{{question+"\nదశలవారీగా సమాధానం:"}}{% else - %}{{"ప్రశ్న: "+question+"\nదశలవారీగా సమాధానం:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer -include: cot_yaml -task: mgsm_te_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml deleted file mode 100644 index d9b8c9da..00000000 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by utils.py -dataset_name: th -doc_to_target: '{% if answer is not none %}{{answer[17+1]}}{% else %}{{answer_number|string}}{% - endif %}' -doc_to_text: '{% if answer is not none %}{{question+"\nคำตอบทีละขั้นตอน:"}}{% else - %}{{"โจทย์: "+question+"\nคำตอบทีละขั้นตอน:"}}{% endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer -include: cot_yaml -task: mgsm_th_direct diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml deleted file mode 100644 index 65678633..00000000 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by utils.py -dataset_name: zh -doc_to_target: '{% if answer is not none %}{{answer[5+1]}}{% else %}{{answer_number|string}}{% - endif %}' -doc_to_text: '{% if answer is not none %}{{question+"\n逐步解答:"}}{% else %}{{"问题: "+question+"\n逐步解答:"}}{% - endif %}' -filter: -- function: regex - regex_pattern: The answer is (\-?[0-9\.\,]+) -- function: take_first -filter_list: -- name: get-answer -include: cot_yaml -task: mgsm_zh_direct diff --git a/lm_eval/tasks/mgsm/utils.py b/lm_eval/tasks/mgsm/utils.py index 3bffd705..c7294a35 100644 --- a/lm_eval/tasks/mgsm/utils.py +++ b/lm_eval/tasks/mgsm/utils.py @@ -4,16 +4,19 @@ import argparse LANGUAGES = { "bn": { # Bengali + # "QUESTION": "প্রশ্ন:", "QUESTION": "\u09aa\u09cd\u09b0\u09b6\u09cd\u09a8:", + # "ANSWER": "ধাপে ধাপে উত্তর:", "ANSWER": "\u09a7\u09be\u09aa\u09c7 \u09a7\u09be\u09aa\u09c7 \u0989\u09a4\u09cd\u09a4\u09b0:", "DIRECT": "Answer:", "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", }, "de": { # German "QUESTION": "Frage:", + # "ANSWER": "Schritt-für-Schritt-Antwort:", "ANSWER": "Schritt-f\u00fcr-Schritt-Antwort:", "DIRECT": "Antwort:", - "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + "REGEX": "Die Antwort lautet (\\-?[0-9\\.\\,]+)", }, "en": { # English "QUESTION": "Question:", @@ -24,50 +27,68 @@ LANGUAGES = { "es": { # Spanish "QUESTION": "Pregunta:", "ANSWER": "Respuesta paso a paso:", - "DIRECT": "Answer:", - "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + "DIRECT": "Respuesta:", + "REGEX": "La respuesta es (\\-?[0-9\\.\\,]+)", }, "fr": { # French "QUESTION": "Question :", + # "ANSWER": "Réponse étape par étape :" "ANSWER": "R\u00e9ponse \u00e9tape par \u00e9tape :", - "DIRECT": "Answer:", - "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + # "DIRECT": "Réponse :", + "DIRECT": "R\u00e9ponse :" + # "REGEX": "La réponse est (\\-?[0-9\\.\\,]+)", + "REGEX": "La r\u00e9ponse est (\\-?[0-9\\.\\,]+)", }, "ru": { # Russian + # "QUESTION": "Задача:", "QUESTION": "\u0417\u0430\u0434\u0430\u0447\u0430:", + # "ANSWER": "Пошаговоерешение:", "ANSWER": "\u041f\u043e\u0448\u0430\u0433\u043e\u0432\u043e\u0435\u0440\u0435\u0448\u0435\u043d\u0438\u0435:", "DIRECT": "Answer:", - "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + # "REGEX": "Ответ — (\\-?[0-9\\.\\,]+)", + "REGEX": "\u041e\u0442\u0432\u0435\u0442 \u2014 (\\-?[0-9\\.\\,]+)", }, "sw": { # Swahili "QUESTION": "Swali:", "ANSWER": "Jibu la Hatua kwa Hatua:", "DIRECT": "Answer:", - "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + "REGEX": "Jibu ni (\\-?[0-9\\.\\,]+)", }, "te": { # Telugu + # "QUESTION": "ప్రశ్న:", "QUESTION": "\u0c2a\u0c4d\u0c30\u0c36\u0c4d\u0c28:", + # "ANSWER": "దశలవారీగా సమాధానం:", "ANSWER": "\u0c26\u0c36\u0c32\u0c35\u0c3e\u0c30\u0c40\u0c17\u0c3e \u0c38\u0c2e\u0c3e\u0c27\u0c3e\u0c28\u0c02:", "DIRECT": "Answer:", - "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + # "REGEX": "సమాధానం (\\-?[0-9\\.\\,]+)", + "REGEX": "\u0c38\u0c2e\u0c3e\u0c27\u0c3e\u0c28\u0c02 (\\-?[0-9\\.\\,]+)", }, "th": { # Thai + # "QUESTION": "โจทย์:", "QUESTION": "\u0e42\u0e08\u0e17\u0e22\u0e4c:", + # "ANSWER": "คำตอบทีละขั้นตอน:", "ANSWER": "\u0e04\u0e33\u0e15\u0e2d\u0e1a\u0e17\u0e35\u0e25\u0e30\u0e02\u0e31\u0e49\u0e19\u0e15\u0e2d\u0e19:", "DIRECT": "Answer:", - "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + # "REGEX": "คำตอบคือ (\\-?[0-9\\.\\,]+)", + "REGEX": "\u0e04\u0e33\u0e15\u0e2d\u0e1a\u0e04\u0e37\u0e2d (\\-?[0-9\\.\\,]+)", }, "ja": { # Japanese + # "QUESTION": "問題:", "QUESTION": "\u554f\u984c:", + # "ANSWER": "ステップごとの答え:", "ANSWER": "\u30b9\u30c6\u30c3\u30d7\u3054\u3068\u306e\u7b54\u3048:", "DIRECT": "Answer:", - "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + # "REGEX": "答えは(\\-?[0-9\\.\\,]+)です。", + "REGEX": "\u7b54\u3048\u306f(\\-?[0-9\\.\\,]+)\u3067\u3059\u3002", }, "zh": { # Chinese + # "QUESTION": "问题:", "QUESTION": "\u95ee\u9898:", + # "ANSWER": "逐步解答:", "ANSWER": "\u9010\u6b65\u89e3\u7b54:", "DIRECT": "Answer:", - "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + # "REGEX": "答案是 (\\-?[0-9\\.\\,]+)。", + "REGEX": "\u7b54\u6848\u662f (\\-?[0-9\\.\\,]+)\u3002", }, } @@ -80,15 +101,15 @@ def add_regex_pattern(regex_pattern): "filter_list": [ { "name": "get-answer", - }, - ], - "filter": [ - { - "function": "regex", - "regex_pattern": regex_pattern, - }, - { - "function": "take_first", + "filter": [ + { + "function": "regex", + "regex_pattern": regex_pattern, + }, + { + "function": "take_first", + }, + ], }, ], } @@ -107,6 +128,7 @@ def gen_lang_yamls(output_dir: str, overwrite: bool, mode: str) -> None: QUESTION = LANGUAGES[lang]["QUESTION"] yaml_template = "cot_yaml" + filter_list = {} if mode == "direct": ANSWER = LANGUAGES[lang]["DIRECT"] REGEX = None @@ -116,13 +138,13 @@ def gen_lang_yamls(output_dir: str, overwrite: bool, mode: str) -> None: ANSWER = LANGUAGES[lang]["ANSWER"] REGEX = LANGUAGES[lang]["REGEX"] task_name = f"mgsm_{lang}_native-cot" + filter_list = add_regex_pattern(REGEX) elif mode == "en-cot": ANSWER = LANGUAGES["en"]["ANSWER"] REGEX = LANGUAGES["en"]["REGEX"] task_name = f"mgsm_{lang}_en-cot" file_name = f"{task_name}.yaml" - filter_list = add_regex_pattern(REGEX) with open( f"{output_dir}/{file_name}", "w" if overwrite else "x", encoding="utf8" @@ -147,6 +169,7 @@ def gen_lang_yamls(output_dir: str, overwrite: bool, mode: str) -> None: }, f, allow_unicode=True, + width=float("inf"), ) except FileExistsError: err.append(file_name) -- GitLab From 095d12f744a3329055f5a82369ea79f8f298025e Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Mon, 11 Sep 2023 18:41:30 +0000 Subject: [PATCH 139/264] fill out interface.md --- docs/interface.md | 95 ++++++++++++++++++++++++++++++++++++++++++++++ docs/user_guide.md | 9 ----- 2 files changed, 95 insertions(+), 9 deletions(-) create mode 100644 docs/interface.md delete mode 100644 docs/user_guide.md diff --git a/docs/interface.md b/docs/interface.md new file mode 100644 index 00000000..029c635d --- /dev/null +++ b/docs/interface.md @@ -0,0 +1,95 @@ +# User Guide + +This document details the interface exposed by `lm-eval` and provides details on what flags are available to users. + +## Command-line Interface + +A majority of users run the library by cloning it from Github and running the `main.py` script. + +Equivalently, running the library can be done via the `lm-eval` entrypoint at the command line. + +This mode supports a number of command-line arguments, the details of which can be also be seen via running with `-h` or `--help`: + +* `--model` : Selects which model type or provider is evaluated. Must be a string corresponding to the name of the model type/provider being used. See [the main README](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor#commercial-apis) for a full list of enabled model names and supported libraries or APIs. + +* `--model_args` : Controls parameters passed to the model constructor. Accepts a string containing comma-separated keyword arguments to the model class of the format `"arg1=val1,arg2=val2,..."`, such as, for example `--model_args pretrained=EleutherAI/pythia-160m,dtype=float32`. For a full list of what keyword arguments, see the initialization of the `lm_eval.api.model.LM` subclass, e.g. [`HFLM`](https://github.com/EleutherAI/lm-evaluation-harness/blob/365fcda9b85bbb6e0572d91976b8daf409164500/lm_eval/models/huggingface.py#L66) + +* `--tasks` : Determines which tasks or task groups are evaluated. Accepts a comma-separated list of task names or task group names. Must be solely comprised of valid tasks/groups. + +* `--num_fewshot` : Sets the number of few-shot examples to place in context. Must be an integer. + +* `--batch_size` : Sets the batch size used for evaluation. Can be a positive integer or `"auto"` to automatically select the largest batch size that will fit in memory, speeding up evaluation. One can pass `--batch_size auto:N` to re-select the maximum batch size `N` times during evaluation. This can help accelerate evaluation further, since `lm-eval` sorts documents in descending order of context length. + +* `--max_batch_size` : Sets the maximum batch size to try to fit in memory, if `--batch_size auto` is passed. + +* `--device` : Sets which device to place the model onto. Must be a string, for example, `"cuda", "cuda:0", "cpu", "mps"`. Defaults to "cuda", and can be ignored if running multi-GPU or running a non-local model type. + +* `--output_path` : A string of the form `dir/file.jsonl` or `dir/`. Provides a path where high-level results will be saved, either into the file named or into the directory named. If `--log_samples` is passed as well, then per-document outputs and metrics will be saved into the directory as well. + +* `--log_samples` : If this flag is passed, then the model's outputs, and the text fed into the model, will be saved at per-document granularity. Must be used with `--output_path`. + +* `--limit` : Accepts an integer, or a float between 0.0 and 1.0 . If passed, will limit the number of documents to evaluate to the first X documents (if an integer) per task or first X% of documents per task. Useful for debugging, especially on costly API models. + +* `--use_cache` : Should be a path where a sqlite db file can be written to. Takes a string of format `/path/to/sqlite_cache_` in order to create a cache db at `/path/to/sqlite_cache_rank{i}.db` for each process (0-NUM_GPUS). This allows results of prior runs to be cached, so that there is no need to re-run results in order to re-score or re-run a given (model, task) pair again. + +* `--decontamination_ngrams_path` : Deprecated, see (this commit)[https://github.com/EleutherAI/lm-evaluation-harness/commit/00209e10f6e27edf5d766145afaf894079b5fe10] or older for a working decontamination-checker tool. + +* `--check_integrity` : If this flag is used, the library tests for each task selected are run to confirm task integrity. + +* `--write_out` : Used for diagnostic purposes to observe the format of task documents passed to a model. If this flag is used, then prints the prompt and gold target string for the first document of each task. + +* `--show_config` : If used, prints the full `lm_eval.api.task.TaskConfig` contents (non-default settings the task YAML file) for each task which was run, at the completion of an evaluation. Useful for when one is modifying a task's configuration YAML locally to transmit the exact configurations used for debugging or for reproducibility purposes. + +* `--include_path` : Accepts a path to a folder. If passed, then all YAML files containing `lm-eval`` compatible task configurations will be added to the task registry as available tasks. Used for when one is writing config files for their own task in a folder other than `lm_eval/tasks/` + +## External Library Usage + +We also support using the library's external API for use within model training loops or other scripts. + +`lm_eval` supplies two functions for external import and use: `lm_eval.evaluate()` and `lm_eval.simple_evaluate()`. + + +`simple_evaluate()` can be used by simply creating an `lm_eval.api.model.LM` subclass that implements the methods described in the [Model Guide](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor/docs/model_guide.md), and wrapping your custom model in that class as follows: + +```python +import lm_eval +... + +my_model = initialize_my_model() # create your model (could be running finetuning with some custom modeling code) +... +lm_obj = Your_LM(model=my_model, batch_size=16) # instantiate an LM subclass that takes your initialized model and can run `Your_LM.loglikelihood()`, `Your_LM.loglikelihood_rolling()`, `Your_LM.greedy_until()` + +results = lm_eval.simple_evaluate( # call simple_evaluate + model=lm_obj, + tasks=["taskname1", "taskname2"], + num_fewshot=0, + ... +) +``` + + +See https://github.com/EleutherAI/lm-evaluation-harness/blob/365fcda9b85bbb6e0572d91976b8daf409164500/lm_eval/evaluator.py#L35 for a full description of all arguments available. All keyword arguments to simple_evaluate share the same role as the command-line flags described previously. + +Additionally, the `evaluate()` function offers the core evaluation functionality provided by the library, but without some of the special handling and simplification + abstraction provided by `simple_evaluate()`. + +See https://github.com/EleutherAI/lm-evaluation-harness/blob/365fcda9b85bbb6e0572d91976b8daf409164500/lm_eval/evaluator.py#L173 for more details. + +As a brief example usage of `evaluate()`: +```python +import lm_eval + +from my_tasks import MyTask1 # suppose you've defined a custom lm_eval.api.Task subclass in your own external codebase +... + +my_model = initialize_my_model() # create your model (could be running finetuning with some custom modeling code) +... +lm_obj = Your_LM(model=my_model, batch_size=16) # instantiate an LM subclass that takes your initialized model and can run `Your_LM.loglikelihood()`, `Your_LM.loglikelihood_rolling()`, `Your_LM.greedy_until()` + + + +def evaluate( + lm=lm_obj, + task_dict={"mytask1": MyTask1}, + ... +): +``` diff --git a/docs/user_guide.md b/docs/user_guide.md deleted file mode 100644 index 86cc5df0..00000000 --- a/docs/user_guide.md +++ /dev/null @@ -1,9 +0,0 @@ -# User Guide - - -## Command-line Interface - - -## External Library Usage - -* We support 2 ways of calling the library, for use within training loops or other scripts. -- GitLab From 0b99c7d2d65664567afb9d3c7f605144513c7f9e Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Mon, 11 Sep 2023 18:41:52 +0000 Subject: [PATCH 140/264] update evaluator docstring --- lm_eval/evaluator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 7aef2acd..4b03f1ec 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -185,7 +185,7 @@ def evaluate( :param lm: obj Language Model :param task_dict: dict[str, Task] - Dictionary of tasks. Tasks will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise. + Dictionary of tasks. Tasks will be taken to have name type(task).config.task . :param limit: int, optional Limit the number of examples per task (only use this for testing) :param bootstrap_iters: -- GitLab From fb436108c1f35c68bdbec8a37e78a44365a91c86 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Mon, 11 Sep 2023 18:55:21 +0000 Subject: [PATCH 141/264] make Task._config a public property --- lm_eval/api/task.py | 139 +++++++++++++++++++++++--------------------- 1 file changed, 72 insertions(+), 67 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 50c53764..fffaf286 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -246,6 +246,11 @@ class Task(abc.ABC): download_mode=download_mode, ) + @property + def config(self): + """Returns the TaskConfig associated with this class.""" + return self._config + @abc.abstractmethod def has_training_docs(self): """Whether the task has a training set""" @@ -348,7 +353,7 @@ class Task(abc.ABC): ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!" eval_logger.info( - f"Building contexts for task '{self._config.task}' on rank {rank}..." + f"Building contexts for task '{self.config.task}' on rank {rank}..." ) instances = [] @@ -358,14 +363,14 @@ class Task(abc.ABC): # sample fewshot context #TODO: need to offset doc_id by rank now! fewshot_ctx = self.fewshot_context( doc, - self._config.num_fewshot, + self.config.num_fewshot, ) - # TODO: we should override self._config.repeats if doing greedy gen so users don't waste time+compute + # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute inst = self.construct_requests( doc=doc, ctx=fewshot_ctx, - metadata=(self._config["task"], doc_id, self._config.repeats), + metadata=(self.config["task"], doc_id, self.config.repeats), ) if not isinstance(inst, list): @@ -453,9 +458,9 @@ class Task(abc.ABC): if num_fewshot == 0: # always prepend the (possibly empty) task description - labeled_examples = self._config.description + labeled_examples = self.config.description else: - labeled_examples = self._config.description + self.sampler.get_context( + labeled_examples = self.config.description + self.sampler.get_context( doc, num_fewshot ) @@ -465,7 +470,7 @@ class Task(abc.ABC): elif type(example) == list: return [labeled_examples + ex for ex in example] elif type(example) == int: - if self._config.doc_to_choice is not None: + if self.config.doc_to_choice is not None: choices = self.doc_to_choice(doc) return labeled_examples + choices[example] else: @@ -488,7 +493,7 @@ class Task(abc.ABC): """ # TODO: this should only return the overrides applied to a non-YAML task's configuration. # (num_fewshot) - return self._config.to_dict() + return self.config.to_dict() class ConfigurableTask(Task): @@ -503,35 +508,35 @@ class ConfigurableTask(Task): self._config = self.CONFIG # Use new configurations if there was no preconfiguration - if self._config is None: + if self.config is None: self._config = TaskConfig(**config) # Overwrite configs else: if config is not None: self._config.__dict__.update(config) - if self._config is None: + if self.config is None: raise ValueError( "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg" ) - if self._config.output_type is not None: - assert self._config.output_type in ALL_OUTPUT_TYPES - self.OUTPUT_TYPE = self._config.output_type + if self.config.output_type is not None: + assert self.config.output_type in ALL_OUTPUT_TYPES + self.OUTPUT_TYPE = self.config.output_type - if self._config.dataset_path is not None: - self.DATASET_PATH = self._config.dataset_path + if self.config.dataset_path is not None: + self.DATASET_PATH = self.config.dataset_path - if self._config.dataset_name is not None: - self.DATASET_NAME = self._config.dataset_name + if self.config.dataset_name is not None: + self.DATASET_NAME = self.config.dataset_name self._metric_fn_list = {} self._metric_fn_kwargs = {} self._aggregation_list = {} self._higher_is_better = {} - _metric_list = DEFAULT_METRIC_REGISTRY[self._config.output_type] - if self._config.metric_list is None: + _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type] + if self.config.metric_list is None: # TODO: handle this in TaskConfig.__post_init__ ? for metric_name in _metric_list: self._metric_fn_list[metric_name] = get_metric(metric_name) @@ -540,7 +545,7 @@ class ConfigurableTask(Task): ) self._higher_is_better[metric_name] = is_higher_better(metric_name) else: - for metric_config in self._config.metric_list: + for metric_config in self.config.metric_list: assert "metric" in metric_config metric_name = metric_config["metric"] kwargs = { @@ -549,7 +554,7 @@ class ConfigurableTask(Task): if key not in ["metric", "aggregation", "higher_is_better"] } - if self._config.process_results is not None: + if self.config.process_results is not None: self._metric_fn_list[metric_name] = None self._metric_fn_kwargs[metric_name] = {} elif callable(metric_name): @@ -592,13 +597,13 @@ class ConfigurableTask(Task): ) self._higher_is_better[metric_name] = is_higher_better(metric_name) - self.download(self._config.dataset_kwargs) + self.download(self.config.dataset_kwargs) self._training_docs = None self._fewshot_docs = None - if self._config.filter_list is not None: + if self.config.filter_list is not None: self._filters = [] - for filter_config in self._config.filter_list: + for filter_config in self.config.filter_list: for filter_pipeline in filter_config: filter_name = filter_config["name"] filter_functions = filter_config["filter"] @@ -613,10 +618,10 @@ class ConfigurableTask(Task): else: self._filters = [build_filter_ensemble("none", [["take_first", None]])] - if self._config.use_prompt is not None: - eval_logger.info(f"loading prompt {self._config.use_prompt}") + if self.config.use_prompt is not None: + eval_logger.info(f"loading prompt {self.config.use_prompt}") self.prompt = get_prompt( - self._config.use_prompt, self.DATASET_PATH, self.DATASET_NAME + self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME ) else: self.prompt = None @@ -643,7 +648,7 @@ class ConfigurableTask(Task): test_text = self.doc_to_text(test_doc) test_target = self.doc_to_target(test_doc) - if self._config.doc_to_choice is not None: + if self.config.doc_to_choice is not None: test_choice = self.doc_to_choice(test_doc) if type(test_choice) is not list: eval_logger.error("doc_to_choice must return list") @@ -671,7 +676,7 @@ class ConfigurableTask(Task): for choice in check_choices: choice_has_whitespace = True if " " in choice else False delimiter_has_whitespace = ( - True if " " in self._config.target_delimiter else False + True if " " in self.config.target_delimiter else False ) if delimiter_has_whitespace and choice_has_whitespace: @@ -692,67 +697,67 @@ class ConfigurableTask(Task): ) def has_training_docs(self) -> bool: - if self._config.training_split is not None: + if self.config.training_split is not None: return True else: return False def has_validation_docs(self) -> bool: - if self._config.validation_split is not None: + if self.config.validation_split is not None: return True else: return False def has_test_docs(self) -> bool: - if self._config.test_split is not None: + if self.config.test_split is not None: return True else: return False def training_docs(self) -> datasets.Dataset: if self.has_training_docs(): - if self._config.process_docs is not None: - return self._config.process_docs( - self.dataset[self._config.training_split] + if self.config.process_docs is not None: + return self.config.process_docs( + self.dataset[self.config.training_split] ) - return self.dataset[self._config.training_split] + return self.dataset[self.config.training_split] def validation_docs(self) -> datasets.Dataset: if self.has_validation_docs(): - if self._config.process_docs is not None: - return self._config.process_docs( - self.dataset[self._config.validation_split] + if self.config.process_docs is not None: + return self.config.process_docs( + self.dataset[self.config.validation_split] ) - return self.dataset[self._config.validation_split] + return self.dataset[self.config.validation_split] def test_docs(self) -> datasets.Dataset: if self.has_test_docs(): - if self._config.process_docs is not None: - return self._config.process_docs(self.dataset[self._config.test_split]) - return self.dataset[self._config.test_split] + if self.config.process_docs is not None: + return self.config.process_docs(self.dataset[self.config.test_split]) + return self.dataset[self.config.test_split] def fewshot_docs(self): - if self._config.fewshot_split is not None: - return self.dataset[self._config.fewshot_split] + if self.config.fewshot_split is not None: + return self.dataset[self.config.fewshot_split] else: - if self._config.num_fewshot > 0: + if self.config.num_fewshot > 0: eval_logger.warning( - f"Task '{self._config.task}': " + f"Task '{self.config.task}': " "num_fewshot > 0 but fewshot_split is None. " "using preconfigured rule." ) return super().fewshot_docs() def should_decontaminate(self): - return self._config.should_decontaminate + return self.config.should_decontaminate def doc_to_decontamination_query(self, doc): - if self._config.should_decontaminate: - if self._config.doc_to_decontamination_query in self.features: - return doc[self._config.doc_to_decontamination_query] + if self.config.should_decontaminate: + if self.config.doc_to_decontamination_query in self.features: + return doc[self.config.doc_to_decontamination_query] else: return ast.literal_eval( - utils.apply_template(self._config.doc_to_decontamination_query, doc) + utils.apply_template(self.config.doc_to_decontamination_query, doc) ) def _process_doc(self, doc): @@ -771,13 +776,13 @@ class ConfigurableTask(Task): if self.prompt is not None: doc_to_text = self.prompt else: - doc_to_text = self._config.doc_to_text + doc_to_text = self.config.doc_to_text if type(doc_to_text) == int: return doc_to_text elif type(doc_to_text) == str: if doc_to_text in self.features: - # if self._config.doc_to_choice is not None: + # if self.config.doc_to_choice is not None: # return self.doc_to_choice(doc)[doc[doc_to_text]] # else: return doc[doc_to_text] @@ -796,7 +801,7 @@ class ConfigurableTask(Task): return applied_prompt[0] else: eval_logger.warning("Applied prompt returns empty string") - return self._config.fewshot_delimiter + return self.config.fewshot_delimiter else: print(type(doc_to_text)) raise TypeError @@ -806,13 +811,13 @@ class ConfigurableTask(Task): if self.prompt is not None: doc_to_target = self.prompt else: - doc_to_target = self._config.doc_to_target + doc_to_target = self.config.doc_to_target if type(doc_to_target) == int: return doc_to_target elif type(doc_to_target) == str: if doc_to_target in self.features: - # if self._config.doc_to_choice is not None: + # if self.config.doc_to_choice is not None: # return self.doc_to_choice(doc)[doc[doc_to_target]] # else: return doc[doc_to_target] @@ -839,7 +844,7 @@ class ConfigurableTask(Task): return applied_prompt[1] else: eval_logger.warning("Applied prompt returns empty string") - return self._config.fewshot_delimiter + return self.config.fewshot_delimiter else: raise TypeError @@ -847,10 +852,10 @@ class ConfigurableTask(Task): if self.prompt is not None: doc_to_choice = self.prompt - elif self._config.doc_to_choice is None: + elif self.config.doc_to_choice is None: eval_logger.error("doc_to_choice was called but not set in config") else: - doc_to_choice = self._config.doc_to_choice + doc_to_choice = self.config.doc_to_choice if type(doc_to_choice) == str: return ast.literal_eval(utils.apply_template(doc_to_choice, doc)) @@ -871,8 +876,8 @@ class ConfigurableTask(Task): # in multiple_choice tasks, this should be castable to an int corresponding to the index # within the answer choices, while doc_to_target is the string version of {{answer_choices[gold]}}. - if self._config.gold_alias is not None: - doc_to_target = self._config.gold_alias + if self.config.gold_alias is not None: + doc_to_target = self.config.gold_alias else: return self.doc_to_target(doc) @@ -896,7 +901,7 @@ class ConfigurableTask(Task): elif self.OUTPUT_TYPE == "multiple_choice": choices = self.doc_to_choice(doc) - target_delimiter = self._config.target_delimiter + target_delimiter = self.config.target_delimiter if self.multiple_input: # If there are multiple inputs, choices are placed in the ctx cont = self.doc_to_target(doc) @@ -938,7 +943,7 @@ class ConfigurableTask(Task): return request_list elif self.OUTPUT_TYPE == "greedy_until": - arguments = (ctx, self._config.generation_kwargs) + arguments = (ctx, self.config.generation_kwargs) return Instance( request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs @@ -946,8 +951,8 @@ class ConfigurableTask(Task): def process_results(self, doc, results): - if callable(self._config.process_results): - return self._config.process_results(doc, results) + if callable(self.config.process_results): + return self.config.process_results(doc, results) result_dict = {} use_metric = list(self._metric_fn_list.keys()) @@ -1036,7 +1041,7 @@ class ConfigurableTask(Task): elif self.OUTPUT_TYPE == "greedy_until": gold = self.doc_to_target(doc) - if self._config.doc_to_choice is not None: + if self.config.doc_to_choice is not None: # If you set doc_to_choice, # it assumes that doc_to_target returns a number. choices = self.doc_to_choice(doc) -- GitLab From 5ebe28ebbc88fc443a928d10a08d082cf68ae0c3 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 12 Sep 2023 02:57:39 +0000 Subject: [PATCH 142/264] fix typo --- lm_eval/tasks/mgsm/en_cot/cot_yaml | 2 +- lm_eval/tasks/mgsm/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/mgsm/en_cot/cot_yaml b/lm_eval/tasks/mgsm/en_cot/cot_yaml index 94fa76be..f5cf60d9 100644 --- a/lm_eval/tasks/mgsm/en_cot/cot_yaml +++ b/lm_eval/tasks/mgsm/en_cot/cot_yaml @@ -26,4 +26,4 @@ filter_list: filter: - function: "regex" regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)" - - function: "take_first" \ No newline at end of file + - function: "take_first" diff --git a/lm_eval/tasks/mgsm/utils.py b/lm_eval/tasks/mgsm/utils.py index c7294a35..97affac7 100644 --- a/lm_eval/tasks/mgsm/utils.py +++ b/lm_eval/tasks/mgsm/utils.py @@ -35,7 +35,7 @@ LANGUAGES = { # "ANSWER": "Réponse étape par étape :" "ANSWER": "R\u00e9ponse \u00e9tape par \u00e9tape :", # "DIRECT": "Réponse :", - "DIRECT": "R\u00e9ponse :" + "DIRECT": "R\u00e9ponse :", # "REGEX": "La réponse est (\\-?[0-9\\.\\,]+)", "REGEX": "La r\u00e9ponse est (\\-?[0-9\\.\\,]+)", }, -- GitLab From f7d9850072ae032d0eb49bc576ef4584a31d01a8 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 12 Sep 2023 10:00:10 +0000 Subject: [PATCH 143/264] allow nested groups --- lm_eval/tasks/__init__.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 6828b233..2f89defe 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -136,6 +136,9 @@ def get_task_dict(task_name_list: List[Union[str, dict, Task]], **kwargs): task_name_from_config_dict = {} task_name_from_object_dict = {} + if type(task_name_list) != list: + task_name_list = [task_name_list] + for task_element in task_name_list: if isinstance(task_element, str): @@ -143,12 +146,20 @@ def get_task_dict(task_name_list: List[Union[str, dict, Task]], **kwargs): group_name = task_element for task_name in GROUP_REGISTRY[task_element]: if task_name not in task_name_from_registry_dict: + task_obj = get_task_dict(task_name) + if task_name in task_obj.keys(): + task_dict = { + task_name: (group_name, task_obj[task_name]), + } + else: + task_dict = { + task_name: (group_name, None), + **task_obj, + } + task_name_from_registry_dict = { **task_name_from_registry_dict, - task_name: ( - group_name, - get_task(task_name=task_name, config=config), - ), + **task_dict, } else: task_name = task_element -- GitLab From 4e4f0de222e9dfc05d52bf92557e3d6ba43c65d9 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 12 Sep 2023 10:00:50 +0000 Subject: [PATCH 144/264] changed term to groups --- lm_eval/utils.py | 8 +++++--- main.py | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/lm_eval/utils.py b/lm_eval/utils.py index c63e8daf..5a734a0c 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -267,9 +267,11 @@ def make_table(result_dict, column: str = "results"): from pytablewriter import MarkdownTableWriter, LatexTableWriter if column == "results": - column_name = "Task" - elif column == "aggregate": - column_name = "Benchmark" + column_name = "Tasks" + elif column == "groups": + column_name = "Groups" + elif column == "benchmarks": + column_name = "Benchmarks" md_writer = MarkdownTableWriter() latex_writer = LatexTableWriter() diff --git a/main.py b/main.py index 083e34be..83e249d6 100644 --- a/main.py +++ b/main.py @@ -209,8 +209,8 @@ def main() -> None: f"batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}" ) print(evaluator.make_table(results)) - if "aggregate" in results: - print(evaluator.make_table(results, "aggregate")) + if "groups" in results: + print(evaluator.make_table(results, "groups")) if __name__ == "__main__": -- GitLab From 6bba33dbf20a3e2cc0444c5bfffa2c1a93d29c27 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 12 Sep 2023 10:01:00 +0000 Subject: [PATCH 145/264] changed term to groups --- lm_eval/utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lm_eval/utils.py b/lm_eval/utils.py index 5a734a0c..fe236a74 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -270,8 +270,6 @@ def make_table(result_dict, column: str = "results"): column_name = "Tasks" elif column == "groups": column_name = "Groups" - elif column == "benchmarks": - column_name = "Benchmarks" md_writer = MarkdownTableWriter() latex_writer = LatexTableWriter() -- GitLab From aed4e6943dd45c460bbb984a1f9ec9c722d66934 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 12 Sep 2023 15:16:57 +0000 Subject: [PATCH 146/264] presented with task-group relation --- lm_eval/evaluator.py | 115 +++++++++++++++++++++++++++++++++---------- 1 file changed, 88 insertions(+), 27 deletions(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 54f9655a..6bc33d46 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -115,11 +115,30 @@ def simple_evaluate( + "_rank" + str(lm.rank) + ".db", ) + # def _change_fewshot(task_dict): + # for task_name in task_dict.keys(): + # task_obj = task_dict[task_name] + # if type(task_obj) == tuple: + # group, task_obj = task_obj + # if task_obj + + # config = task_obj._config + # if num_fewshot is not None: + # if config["num_fewshot"] > 0: + # default_num_fewshot = config["num_fewshot"] + # eval_logger.warning( + # f"Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}" + # ) + + # task_obj._config["num_fewshot"] = num_fewshot + task_dict = lm_eval.tasks.get_task_dict(tasks) for task_name in task_dict.keys(): task_obj = task_dict[task_name] if type(task_obj) == tuple: group, task_obj = task_obj + if task_obj is None: + continue config = task_obj._config if num_fewshot is not None: @@ -210,22 +229,29 @@ def evaluate( # tracks all Instances/requests a model must generate output on. requests = collections.defaultdict(list) # Stores task scores based on task grouping. - aggregate = collections.defaultdict(dict) + results_agg = collections.defaultdict(dict) # tracks if a task was chosen via user selecting a group containing it - task_groups = collections.defaultdict(dict) + task_to_group = collections.defaultdict(dict) + group_to_task = collections.defaultdict(list) # stores the amount to pad out reqs per req. type so that # number of fwd passes per distributed rank is equal padding_requests = collections.defaultdict(int) - # Stores group related keys and values for group-aggregation - task_groups = collections.defaultdict(dict) - # get lists of each type of request for task_name, task in task_dict.items(): if type(task) == tuple: group, task = task - task_groups[task_name] = group - aggregate[task_name] = {} + task_to_group[task_name] = group + + if group in list(group_to_task.keys()): + group_to_task[group].append(task_name) + else: + group_to_task[group] = [task_name] + + if task is None: + continue + else: + group_to_task[task_name] = [] versions[task_name] = task.VERSION configs[task_name] = dict(task.dump_config()) @@ -301,6 +327,8 @@ def evaluate( for task_name, task in task_dict.items(): if type(task) == tuple: group, task = task + if task is None: + continue task.apply_filters() ### Collect values of metrics on all datapoints ### @@ -310,6 +338,8 @@ def evaluate( for task_name, task in task_dict.items(): if type(task) == tuple: group, task = task + if task is None: + continue # TODO: make it possible to use a different metric per filter # iterate over different filters used for key in task.instances[0].filtered_resps.keys(): @@ -405,19 +435,6 @@ def evaluate( task_score = task.aggregation()[metric](items) results[task_name][metric + "," + key] = task_score - # Need to put back in results - # pythia | acc - # | perplexity - # | word_perplexity - # | byte_perplexity - # | bits_per_byte - if task_name in task_groups: - group_name = task_groups[task_name] - if metric in list(aggregate[group_name].keys()): - aggregate[group_name][metric].append(task_score) - else: - aggregate[group_name][metric] = [task_score] - # hotfix: bleu, chrf, ter seem to be really expensive to bootstrap # so we run them less iterations. still looking for a cleaner way to do this if False: # bootstrap_iters > 0: @@ -431,17 +448,61 @@ def evaluate( if stderr is not None: results[task_name][metric + "_stderr" + "," + key] = stderr(items) - if bool(aggregate): - for group in aggregate.keys(): - for metric in aggregate[group].keys(): - aggregate[group][metric] = np.average(aggregate[group][metric]) + tab_dict = {} + for group in group_to_task: + task_list = group_to_task[group] + if group not in tab_dict: + tab_dict[group] = 0 + + for task in task_list: + if task in tab_dict: + tab_dict[task] += 1 + else: + tab_dict[task] = 1 + tab_dict[group] + print(tab_dict) + zero_order_groups = [group for group in tab_dict if tab_dict[group] == 0] + + for task_name, task in task_dict.items(): + if type(task) == tuple: + group_name, _ = task + else: + group_name = None + + scores = results[task_name] + if group_name is not None: + group_name = tab_dict[group_name] * "-" + group_name + if group_name not in results_agg: + results_agg[group_name] = {} + + for metric in scores: + if metric in results_agg[group_name]: + results_agg[group_name][metric].append(scores[metric]) + else: + results_agg[group_name][metric] = [scores[metric]] + + tab_task_name = tab_dict[task_name] * "-" + task_name + results_agg[tab_task_name] = scores + versions[tab_task_name] = versions[task_name] + + if bool(results_agg): + for group in results_agg.keys(): + for metric in results_agg[group].keys(): + results_agg[group][metric] = np.average(results_agg[group][metric]) versions[group] = "N/A" results_dict = { - "results": dict(sorted(results.items())), + "results": dict(results_agg.items()), **( - {"aggregate": dict(sorted(aggregate.items()))} - if bool(aggregate) + { + "groups": dict( + [ + item + for item in results_agg.items() + if item[0] in zero_order_groups + ] + ) + } + if len(zero_order_groups) > 0 else {} ), "configs": dict(sorted(configs.items())), -- GitLab From 17ec49064a2eeba77c438b345fbc7dadabc436fc Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 12 Sep 2023 15:25:37 +0000 Subject: [PATCH 147/264] update --- lm_eval/benchmarks/__init__.py | 2 +- lm_eval/benchmarks/pythia.yaml | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lm_eval/benchmarks/__init__.py b/lm_eval/benchmarks/__init__.py index 77dcdef0..4b224d2b 100644 --- a/lm_eval/benchmarks/__init__.py +++ b/lm_eval/benchmarks/__init__.py @@ -44,7 +44,7 @@ def include_benchmarks(task_dir: str) -> None: task_names = utils.pattern_match(task_list, ALL_TASKS) for task in task_names: - if task in TASK_REGISTRY: + if (task in TASK_REGISTRY) or (task in GROUP_REGISTRY): if group in GROUP_REGISTRY: GROUP_REGISTRY[group].append(task) else: diff --git a/lm_eval/benchmarks/pythia.yaml b/lm_eval/benchmarks/pythia.yaml index 23419ea6..bb5b1174 100644 --- a/lm_eval/benchmarks/pythia.yaml +++ b/lm_eval/benchmarks/pythia.yaml @@ -1,12 +1,12 @@ group: pythia task: - lambada_openai - - wikitext + - logiqa - piqa - sciq - - wsc + - wikitext - winogrande - - arc - - logiqa + - wsc + - ai2_arc - blimp - hendrycksTest* -- GitLab From d29c0940c0ca6bbe0df5562a55ee302c952bf8db Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 12 Sep 2023 15:27:41 +0000 Subject: [PATCH 148/264] update --- lm_eval/evaluator.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 6bc33d46..f52a5bd5 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -459,7 +459,6 @@ def evaluate( tab_dict[task] += 1 else: tab_dict[task] = 1 + tab_dict[group] - print(tab_dict) zero_order_groups = [group for group in tab_dict if tab_dict[group] == 0] for task_name, task in task_dict.items(): -- GitLab From b11f7f3750a5efd3433ffe19c5d435a5cd77d3a4 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 12 Sep 2023 15:48:34 +0000 Subject: [PATCH 149/264] add TODO --- lm_eval/prompts/__init__.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lm_eval/prompts/__init__.py b/lm_eval/prompts/__init__.py index e8ba6b42..5c91e090 100644 --- a/lm_eval/prompts/__init__.py +++ b/lm_eval/prompts/__init__.py @@ -1,3 +1,5 @@ +import ast + from lm_eval import utils from lm_eval.logger import eval_logger @@ -63,6 +65,12 @@ def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, **kwa else: prompts = DatasetTemplates(dataset_name=dataset_name, subset_name=subset_name) - category_name, prompt_name = use_prompt.split(":") + # TODO allow to multiple prompt naming + # category_name, *prompt_name = use_prompt.split(":") + # if len(prompt_name) > 1: + # prompt_list = [] + # for prompt in prompt_name: + # prompt_list.append(utils.pattern_match(prompt_name, prompts.all_template_names)) + # else: prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names) return [":".join([category_name, prompt]) for prompt in prompt_list] -- GitLab From 973d563ae65b4c472f7a04af2c4e5ad6bd7ce133 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 12 Sep 2023 15:49:36 +0000 Subject: [PATCH 150/264] add TODO --- lm_eval/prompts/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/prompts/__init__.py b/lm_eval/prompts/__init__.py index 5c91e090..d1048024 100644 --- a/lm_eval/prompts/__init__.py +++ b/lm_eval/prompts/__init__.py @@ -65,8 +65,8 @@ def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, **kwa else: prompts = DatasetTemplates(dataset_name=dataset_name, subset_name=subset_name) + category_name, *prompt_name = use_prompt.split(":") # TODO allow to multiple prompt naming - # category_name, *prompt_name = use_prompt.split(":") # if len(prompt_name) > 1: # prompt_list = [] # for prompt in prompt_name: -- GitLab From 993baaa6e6ff26a99c231a5c3aa6bf675befb758 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 12 Sep 2023 15:56:43 +0000 Subject: [PATCH 151/264] remove comments --- lm_eval/evaluator.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index f52a5bd5..752b235f 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -115,23 +115,6 @@ def simple_evaluate( + "_rank" + str(lm.rank) + ".db", ) - # def _change_fewshot(task_dict): - # for task_name in task_dict.keys(): - # task_obj = task_dict[task_name] - # if type(task_obj) == tuple: - # group, task_obj = task_obj - # if task_obj - - # config = task_obj._config - # if num_fewshot is not None: - # if config["num_fewshot"] > 0: - # default_num_fewshot = config["num_fewshot"] - # eval_logger.warning( - # f"Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}" - # ) - - # task_obj._config["num_fewshot"] = num_fewshot - task_dict = lm_eval.tasks.get_task_dict(tasks) for task_name in task_dict.keys(): task_obj = task_dict[task_name] -- GitLab From 6414d55b9d8f04c1c78e2d6f3f115d58f1de28b3 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 13 Sep 2023 10:10:10 +0000 Subject: [PATCH 152/264] fixed warning --- lm_eval/api/task.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index c0a0d36f..687d4263 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -671,9 +671,9 @@ class ConfigurableTask(Task): check_choices = [test_target] for choice in check_choices: - choice_has_whitespace = True if " " in choice else False + choice_has_whitespace = True if choice.startswith(" ") or choice.endswith(" ") else False delimiter_has_whitespace = ( - True if " " in self._config.target_delimiter else False + True if (self._config.target_delimiter.startswith(" ") or self._config.target_delimiter.endswith(" ")) else False ) if delimiter_has_whitespace and choice_has_whitespace: -- GitLab From 91d8484012b7ef1bbfd55ab9907717c42e0d444d Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 13 Sep 2023 10:10:24 +0000 Subject: [PATCH 153/264] cleaner process --- lm_eval/evaluator.py | 141 ++++++++++++++++++++++++------------------- 1 file changed, 80 insertions(+), 61 deletions(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 752b235f..f057133a 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -213,28 +213,24 @@ def evaluate( requests = collections.defaultdict(list) # Stores task scores based on task grouping. results_agg = collections.defaultdict(dict) + groups_agg = collections.defaultdict(dict) # tracks if a task was chosen via user selecting a group containing it - task_to_group = collections.defaultdict(dict) - group_to_task = collections.defaultdict(list) # stores the amount to pad out reqs per req. type so that # number of fwd passes per distributed rank is equal padding_requests = collections.defaultdict(int) + task_hierarchy = collections.defaultdict(list) + task_order = collections.defaultdict(int) # get lists of each type of request for task_name, task in task_dict.items(): if type(task) == tuple: - group, task = task - task_to_group[task_name] = group - - if group in list(group_to_task.keys()): - group_to_task[group].append(task_name) - else: - group_to_task[group] = [task_name] - - if task is None: - continue + group_name, task = task + task_hierarchy[group_name].append(task_name) else: - group_to_task[task_name] = [] + task_hierarchy[task_name] = [] + + if task is None: + continue versions[task_name] = task.VERSION configs[task_name] = dict(task.dump_config()) @@ -413,10 +409,26 @@ def evaluate( # aggregate results ; run bootstrap CIs for (task_name, key, metric), items in vals.items(): task = task_dict[task_name] + metric_key = metric + "," + key + if type(task) == tuple: - group, task = task + group_name, task = task + else: + group_name = None + task_score = task.aggregation()[metric](items) - results[task_name][metric + "," + key] = task_score + + if group_name is not None: + sample_metric_key = metric + "(sample avg)," + key + task_metric_key = metric + "(task avg)," + key + if task_metric_key in results[group_name]: + results[group_name][task_metric_key].append(task_score) + results[group_name][sample_metric_key].extend(items) + else: + results[group_name][task_metric_key] = [task_score] + results[group_name][sample_metric_key] = items + + results[task_name][metric_key] = task_score # hotfix: bleu, chrf, ter seem to be really expensive to bootstrap # so we run them less iterations. still looking for a cleaner way to do this @@ -431,60 +443,67 @@ def evaluate( if stderr is not None: results[task_name][metric + "_stderr" + "," + key] = stderr(items) - tab_dict = {} - for group in group_to_task: - task_list = group_to_task[group] - if group not in tab_dict: - tab_dict[group] = 0 - - for task in task_list: - if task in tab_dict: - tab_dict[task] += 1 + # zero_order_groups = [group for group in task_hierarchy if task_hierarchy[group] == 0] + + # for task_name, task in task_dict.items(): + # if type(task) == tuple: + # group_name, _ = task + # else: + # group_name = None + + # scores = results[task_name] + # if group_name is not None: + # group_name = tab_dict[group_name] * "-" + group_name + # if group_name not in results_agg: + # results_agg[group_name] = {} + + # for metric in scores: + # if metric in results_agg[group_name]: + # results_agg[group_name][metric].append(scores[metric]) + # else: + # results_agg[group_name][metric] = [scores[metric]] + + # tab_task_name = tab_dict[task_name] * "-" + task_name + # results_agg[tab_task_name] = scores + # versions[tab_task_name] = versions[task_name] + + # if bool(results_agg): + # for group in results_agg.keys(): + # for metric in results_agg[group].keys(): + # results_agg[group][metric] = np.average(results_agg[group][metric]) + # versions[group] = "N/A" + + if bool(results): + for task_or_group in results.keys(): + for metric in results[task_or_group].keys(): + if type(results[task_or_group][metric]) == list: + results[task_or_group][metric] = np.average(results[task_or_group][metric]) + versions[task_or_group] = "N/A" + + for group in task_hierarchy.keys(): + if group not in task_order: + task_order[group] = 0 + + for task in task_hierarchy[group]: + if task in task_order: + task_order[task] += 1 else: - tab_dict[task] = 1 + tab_dict[group] - zero_order_groups = [group for group in tab_dict if tab_dict[group] == 0] + task_order[task] = 1 + task_order[group] - for task_name, task in task_dict.items(): - if type(task) == tuple: - group_name, _ = task - else: - group_name = None - - scores = results[task_name] - if group_name is not None: - group_name = tab_dict[group_name] * "-" + group_name - if group_name not in results_agg: - results_agg[group_name] = {} - - for metric in scores: - if metric in results_agg[group_name]: - results_agg[group_name][metric].append(scores[metric]) - else: - results_agg[group_name][metric] = [scores[metric]] - - tab_task_name = tab_dict[task_name] * "-" + task_name - results_agg[tab_task_name] = scores - versions[tab_task_name] = versions[task_name] - - if bool(results_agg): - for group in results_agg.keys(): - for metric in results_agg[group].keys(): - results_agg[group][metric] = np.average(results_agg[group][metric]) - versions[group] = "N/A" + for task_or_group, order in task_order.items(): + tabbed_name = ">"*order+task_or_group + results_agg[tabbed_name] = results[task_or_group] + versions[tabbed_name] = versions[task_or_group] + if (order == 0) and len(task_hierarchy[task_or_group]) > 0: + groups_agg[task_or_group] = results[task_or_group] results_dict = { "results": dict(results_agg.items()), **( { - "groups": dict( - [ - item - for item in results_agg.items() - if item[0] in zero_order_groups - ] - ) + "groups": dict(groups_agg.items()) } - if len(zero_order_groups) > 0 + if bool(groups_agg) else {} ), "configs": dict(sorted(configs.items())), -- GitLab From e1e05b19ed6eab82fea943e5090ccc6b22377ec3 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 13 Sep 2023 10:11:11 +0000 Subject: [PATCH 154/264] cleaner process --- lm_eval/evaluator.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index f057133a..ab1c2dfb 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -480,6 +480,9 @@ def evaluate( results[task_or_group][metric] = np.average(results[task_or_group][metric]) versions[task_or_group] = "N/A" + print("task_hierarchy") + print(task_hierarchy) + print("--") for group in task_hierarchy.keys(): if group not in task_order: task_order[group] = 0 @@ -490,6 +493,9 @@ def evaluate( else: task_order[task] = 1 + task_order[group] + print("task_order") + print(task_order) + print("--") for task_or_group, order in task_order.items(): tabbed_name = ">"*order+task_or_group results_agg[tabbed_name] = results[task_or_group] -- GitLab From d4f62844742262ac1b4470d433b9915b32ff88d1 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 13 Sep 2023 10:35:11 +0000 Subject: [PATCH 155/264] better presentation --- lm_eval/evaluator.py | 60 +++++++++++++------------------------------- 1 file changed, 18 insertions(+), 42 deletions(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index ab1c2dfb..65f33c73 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -443,46 +443,17 @@ def evaluate( if stderr is not None: results[task_name][metric + "_stderr" + "," + key] = stderr(items) - # zero_order_groups = [group for group in task_hierarchy if task_hierarchy[group] == 0] - - # for task_name, task in task_dict.items(): - # if type(task) == tuple: - # group_name, _ = task - # else: - # group_name = None - - # scores = results[task_name] - # if group_name is not None: - # group_name = tab_dict[group_name] * "-" + group_name - # if group_name not in results_agg: - # results_agg[group_name] = {} - - # for metric in scores: - # if metric in results_agg[group_name]: - # results_agg[group_name][metric].append(scores[metric]) - # else: - # results_agg[group_name][metric] = [scores[metric]] - - # tab_task_name = tab_dict[task_name] * "-" + task_name - # results_agg[tab_task_name] = scores - # versions[tab_task_name] = versions[task_name] - - # if bool(results_agg): - # for group in results_agg.keys(): - # for metric in results_agg[group].keys(): - # results_agg[group][metric] = np.average(results_agg[group][metric]) - # versions[group] = "N/A" - if bool(results): for task_or_group in results.keys(): for metric in results[task_or_group].keys(): + try: + print(task_or_group, metric, len(results[task_or_group][metric])) + except: + pass if type(results[task_or_group][metric]) == list: results[task_or_group][metric] = np.average(results[task_or_group][metric]) versions[task_or_group] = "N/A" - print("task_hierarchy") - print(task_hierarchy) - print("--") for group in task_hierarchy.keys(): if group not in task_order: task_order[group] = 0 @@ -493,15 +464,20 @@ def evaluate( else: task_order[task] = 1 + task_order[group] - print("task_order") - print(task_order) - print("--") - for task_or_group, order in task_order.items(): - tabbed_name = ">"*order+task_or_group - results_agg[tabbed_name] = results[task_or_group] - versions[tabbed_name] = versions[task_or_group] - if (order == 0) and len(task_hierarchy[task_or_group]) > 0: - groups_agg[task_or_group] = results[task_or_group] + for task_name, task in task_dict.items(): + if type(task) == tuple: + group_name, task = task + order = task_order[group_name] + tabbed_name = "-"*order+group_name + results_agg[tabbed_name] = results[group_name] + versions[tabbed_name] = versions[group_name] + if order == 0: + groups_agg[group_name] = results[group_name] + + order = task_order[task_name] + tabbed_name = "-"*order+task_name + results_agg[tabbed_name] = results[task_name] + versions[tabbed_name] = versions[task_name] results_dict = { "results": dict(results_agg.items()), -- GitLab From d2804132e2d73855ca139b3ade44bc7819664c19 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 13 Sep 2023 13:07:37 +0000 Subject: [PATCH 156/264] aggregates across tasks within the same group in addition to average accross task average --- lm_eval/evaluator.py | 78 ++++++++++++++++++++++++++++++-------------- 1 file changed, 53 insertions(+), 25 deletions(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 65f33c73..515cfd11 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -211,15 +211,19 @@ def evaluate( samples = collections.defaultdict(list) # tracks all Instances/requests a model must generate output on. requests = collections.defaultdict(list) - # Stores task scores based on task grouping. + # Aggregated task scores presented with groups results_agg = collections.defaultdict(dict) + # Aggregated groups scores only groups_agg = collections.defaultdict(dict) - # tracks if a task was chosen via user selecting a group containing it # stores the amount to pad out reqs per req. type so that # number of fwd passes per distributed rank is equal padding_requests = collections.defaultdict(int) + task_hierarchy = collections.defaultdict(list) + task_order = collections.defaultdict(int) + + sample_agg_fn = collections.defaultdict(dict) # get lists of each type of request for task_name, task in task_dict.items(): @@ -405,6 +409,35 @@ def evaluate( vals = vals_torch if lm.rank == 0: + + ### Get task ordering for correct sample-wide aggregation + group_to_task = {} + for group in task_hierarchy.keys(): + if group not in task_order: + task_order[group] = 0 + + if len(task_hierarchy[group]) > 0: + group_to_task[group] = task_hierarchy[group].copy() + + for task in task_hierarchy[group]: + + if task in task_order: + task_order[task] += 1 + else: + task_order[task] = 1 + task_order[group] + + if task in task_hierarchy: + group_to_task[group].remove(task) + group_to_task[group].extend(task_hierarchy[task]) + + task_to_group = {} + for group in group_to_task: + for task in group_to_task[group]: + if task in task_to_group: + task_to_group[task].append(group) + else: + task_to_group[task] = [group] + ### Aggregate results over all datapoints ### # aggregate results ; run bootstrap CIs for (task_name, key, metric), items in vals.items(): @@ -416,17 +449,22 @@ def evaluate( else: group_name = None - task_score = task.aggregation()[metric](items) + agg_fn = task.aggregation()[metric] + task_score = agg_fn(items) if group_name is not None: - sample_metric_key = metric + "(sample avg)," + key - task_metric_key = metric + "(task avg)," + key - if task_metric_key in results[group_name]: - results[group_name][task_metric_key].append(task_score) - results[group_name][sample_metric_key].extend(items) - else: - results[group_name][task_metric_key] = [task_score] - results[group_name][sample_metric_key] = items + sample_metric_key = metric + "(sample agg)," + key + for grouping in task_to_group[task_name]: + if metric_key in results[grouping]: + results[grouping][metric_key].append(task_score) + else: + results[grouping][metric_key] = [task_score] + + if sample_metric_key in results[grouping]: + results[grouping][sample_metric_key] += items + else: + results[grouping][sample_metric_key] = items.copy() + sample_agg_fn[grouping][sample_metric_key] = agg_fn results[task_name][metric_key] = task_score @@ -446,23 +484,13 @@ def evaluate( if bool(results): for task_or_group in results.keys(): for metric in results[task_or_group].keys(): - try: - print(task_or_group, metric, len(results[task_or_group][metric])) - except: - pass if type(results[task_or_group][metric]) == list: - results[task_or_group][metric] = np.average(results[task_or_group][metric]) + if "(sample agg)" in metric: + results[task_or_group][metric] = sample_agg_fn[task_or_group][metric](results[task_or_group][metric]) + else: + results[task_or_group][metric] = np.average(results[task_or_group][metric]) versions[task_or_group] = "N/A" - for group in task_hierarchy.keys(): - if group not in task_order: - task_order[group] = 0 - - for task in task_hierarchy[group]: - if task in task_order: - task_order[task] += 1 - else: - task_order[task] = 1 + task_order[group] for task_name, task in task_dict.items(): if type(task) == tuple: -- GitLab From 605787a9658e1acb5ff3f01900723c5bd360ca7b Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 13 Sep 2023 14:31:10 +0000 Subject: [PATCH 157/264] add bigbench --- lm_eval/tasks/bigbench/README.md | 49 +++++ lm_eval/tasks/bigbench/generate_tasks.py | 198 ++++++++++++++++++ .../abstract_narrative_understanding.yaml | 4 + .../bigbench/greedy_until/anachronisms.yaml | 4 + .../greedy_until/analogical_similarity.yaml | 4 + .../greedy_until/analytic_entailment.yaml | 4 + .../bigbench/greedy_until/arithmetic.yaml | 4 + .../greedy_until/ascii_word_recognition.yaml | 4 + .../greedy_until/authorship_verification.yaml | 4 + .../greedy_until/auto_categorization.yaml | 4 + .../bigbench/greedy_until/auto_debugging.yaml | 4 + .../bigbench/greedy_until/bbq_lite_json.yaml | 4 + .../bridging_anaphora_resolution_barqa.yaml | 4 + .../greedy_until/causal_judgment.yaml | 4 + .../greedy_until/cause_and_effect.yaml | 4 + .../greedy_until/checkmate_in_one.yaml | 4 + .../greedy_until/chess_state_tracking.yaml | 4 + .../chinese_remainder_theorem.yaml | 4 + .../greedy_until/cifar10_classification.yaml | 4 + .../greedy_until/code_line_description.yaml | 4 + .../bigbench/greedy_until/codenames.yaml | 4 + .../tasks/bigbench/greedy_until/color.yaml | 4 + .../greedy_until/common_morpheme.yaml | 4 + .../greedy_until/conceptual_combinations.yaml | 4 + .../greedy_until/conlang_translation.yaml | 4 + ...extual_parametric_knowledge_conflicts.yaml | 4 + .../bigbench/greedy_until/crash_blossom.yaml | 4 + .../tasks/bigbench/greedy_until/crass_ai.yaml | 4 + .../greedy_until/cryobiology_spanish.yaml | 4 + .../bigbench/greedy_until/cryptonite.yaml | 4 + .../bigbench/greedy_until/cs_algorithms.yaml | 4 + .../greedy_until/dark_humor_detection.yaml | 4 + .../greedy_until/date_understanding.yaml | 4 + .../greedy_until/disambiguation_qa.yaml | 4 + .../discourse_marker_prediction.yaml | 4 + .../tasks/bigbench/greedy_until/disfl_qa.yaml | 4 + .../bigbench/greedy_until/dyck_languages.yaml | 4 + .../greedy_until/elementary_math_qa.yaml | 4 + .../bigbench/greedy_until/emoji_movie.yaml | 4 + .../emojis_emotion_prediction.yaml | 4 + .../greedy_until/empirical_judgments.yaml | 4 + .../greedy_until/english_proverbs.yaml | 4 + .../english_russian_proverbs.yaml | 4 + .../greedy_until/entailed_polarity.yaml | 4 + .../greedy_until/entailed_polarity_hindi.yaml | 4 + .../greedy_until/epistemic_reasoning.yaml | 4 + .../evaluating_information_essentiality.yaml | 4 + .../bigbench/greedy_until/fact_checker.yaml | 4 + .../greedy_until/fantasy_reasoning.yaml | 4 + .../bigbench/greedy_until/few_shot_nlg.yaml | 4 + .../figure_of_speech_detection.yaml | 4 + .../formal_fallacies_syllogisms_negation.yaml | 4 + lm_eval/tasks/bigbench/greedy_until/gem.yaml | 4 + .../gender_inclusive_sentences_german.yaml | 4 + .../greedy_until/general_knowledge.yaml | 4 + .../greedy_until/geometric_shapes.yaml | 4 + .../greedy_until/goal_step_wikihow.yaml | 4 + .../gre_reading_comprehension.yaml | 4 + .../bigbench/greedy_until/hhh_alignment.yaml | 4 + .../hindi_question_answering.yaml | 4 + .../greedy_until/hindu_knowledge.yaml | 4 + .../greedy_until/hinglish_toxicity.yaml | 4 + .../greedy_until/human_organs_senses.yaml | 4 + .../bigbench/greedy_until/hyperbaton.yaml | 4 + .../greedy_until/identify_math_theorems.yaml | 4 + .../greedy_until/identify_odd_metaphor.yaml | 4 + .../bigbench/greedy_until/implicatures.yaml | 4 + .../greedy_until/implicit_relations.yaml | 4 + .../greedy_until/intent_recognition.yaml | 4 + .../international_phonetic_alphabet_nli.yaml | 4 + ...ional_phonetic_alphabet_transliterate.yaml | 4 + .../greedy_until/intersect_geometry.yaml | 4 + .../greedy_until/irony_identification.yaml | 4 + .../bigbench/greedy_until/kanji_ascii.yaml | 4 + .../tasks/bigbench/greedy_until/kannada.yaml | 4 + .../bigbench/greedy_until/key_value_maps.yaml | 4 + .../bigbench/greedy_until/known_unknowns.yaml | 4 + .../bigbench/greedy_until/language_games.yaml | 4 + .../greedy_until/language_identification.yaml | 4 + .../greedy_until/linguistic_mappings.yaml | 4 + .../greedy_until/linguistics_puzzles.yaml | 4 + .../bigbench/greedy_until/list_functions.yaml | 4 + .../greedy_until/logic_grid_puzzle.yaml | 4 + .../bigbench/greedy_until/logical_args.yaml | 4 + .../greedy_until/logical_deduction.yaml | 4 + .../logical_fallacy_detection.yaml | 4 + .../greedy_until/logical_sequence.yaml | 4 + .../greedy_until/mathematical_induction.yaml | 4 + .../bigbench/greedy_until/matrixshapes.yaml | 4 + .../greedy_until/metaphor_boolean.yaml | 4 + .../greedy_until/metaphor_understanding.yaml | 4 + .../greedy_until/minute_mysteries_qa.yaml | 4 + .../bigbench/greedy_until/misconceptions.yaml | 4 + .../greedy_until/misconceptions_russian.yaml | 4 + .../bigbench/greedy_until/mnist_ascii.yaml | 4 + .../greedy_until/modified_arithmetic.yaml | 4 + .../greedy_until/moral_permissibility.yaml | 4 + .../movie_dialog_same_or_different.yaml | 4 + .../greedy_until/movie_recommendation.yaml | 4 + .../greedy_until/mult_data_wrangling.yaml | 4 + .../tasks/bigbench/greedy_until/multiemo.yaml | 4 + .../greedy_until/natural_instructions.yaml | 4 + .../tasks/bigbench/greedy_until/navigate.yaml | 4 + .../greedy_until/nonsense_words_grammar.yaml | 4 + .../bigbench/greedy_until/novel_concepts.yaml | 4 + .../greedy_until/object_counting.yaml | 4 + .../bigbench/greedy_until/odd_one_out.yaml | 4 + .../bigbench/greedy_until/operators.yaml | 4 + .../greedy_until/paragraph_segmentation.yaml | 4 + .../bigbench/greedy_until/parsinlu_qa.yaml | 4 + .../parsinlu_reading_comprehension.yaml | 4 + .../greedy_until/penguins_in_a_table.yaml | 4 + .../greedy_until/periodic_elements.yaml | 4 + .../bigbench/greedy_until/persian_idioms.yaml | 4 + .../greedy_until/phrase_relatedness.yaml | 4 + .../greedy_until/physical_intuition.yaml | 4 + .../tasks/bigbench/greedy_until/physics.yaml | 4 + .../greedy_until/physics_questions.yaml | 4 + .../play_dialog_same_or_different.yaml | 4 + .../polish_sequence_labeling.yaml | 4 + .../greedy_until/presuppositions_as_nli.yaml | 4 + .../bigbench/greedy_until/qa_wikidata.yaml | 4 + .../greedy_until/question_selection.yaml | 4 + .../greedy_until/real_or_fake_text.yaml | 4 + .../reasoning_about_colored_objects.yaml | 4 + .../greedy_until/repeat_copy_logic.yaml | 4 + .../tasks/bigbench/greedy_until/rephrase.yaml | 4 + .../bigbench/greedy_until/riddle_sense.yaml | 4 + .../bigbench/greedy_until/ruin_names.yaml | 4 + .../salient_translation_error_detection.yaml | 4 + .../scientific_press_release.yaml | 4 + .../semantic_parsing_in_context_sparc.yaml | 4 + .../greedy_until/semantic_parsing_spider.yaml | 4 + .../greedy_until/sentence_ambiguity.yaml | 4 + .../similarities_abstraction.yaml | 4 + .../greedy_until/simp_turing_concept.yaml | 4 + .../greedy_until/simple_arithmetic_json.yaml | 4 + ...imple_arithmetic_json_multiple_choice.yaml | 4 + .../simple_arithmetic_json_subtasks.yaml | 4 + ...mple_arithmetic_multiple_targets_json.yaml | 4 + .../simple_ethical_questions.yaml | 4 + .../greedy_until/simple_text_editing.yaml | 4 + .../tasks/bigbench/greedy_until/snarks.yaml | 4 + .../bigbench/greedy_until/social_iqa.yaml | 4 + .../bigbench/greedy_until/social_support.yaml | 4 + .../greedy_until/sports_understanding.yaml | 4 + .../greedy_until/strange_stories.yaml | 4 + .../bigbench/greedy_until/strategyqa.yaml | 4 + .../greedy_until/sufficient_information.yaml | 4 + .../bigbench/greedy_until/suicide_risk.yaml | 4 + .../swahili_english_proverbs.yaml | 4 + .../swedish_to_german_proverbs.yaml | 4 + .../greedy_until/symbol_interpretation.yaml | 4 + .../greedy_until/temporal_sequences.yaml | 4 + .../tasks/bigbench/greedy_until/tense.yaml | 4 + .../tasks/bigbench/greedy_until/timedial.yaml | 4 + .../bigbench/greedy_until/topical_chat.yaml | 4 + .../tracking_shuffled_objects.yaml | 4 + .../greedy_until/understanding_fables.yaml | 4 + .../greedy_until/undo_permutation.yaml | 4 + .../greedy_until/unit_conversion.yaml | 4 + .../greedy_until/unit_interpretation.yaml | 4 + .../unnatural_in_context_learning.yaml | 4 + .../vitaminc_fact_verification.yaml | 4 + .../greedy_until/what_is_the_tao.yaml | 4 + .../greedy_until/which_wiki_edit.yaml | 4 + .../tasks/bigbench/greedy_until/winowhy.yaml | 4 + .../bigbench/greedy_until/word_sorting.yaml | 4 + .../greedy_until/word_unscrambling.yaml | 4 + .../tasks/bigbench/greedy_until_template_yaml | 14 ++ .../abstract_narrative_understanding.yaml | 4 + .../multiple_choice/anachronisms.yaml | 4 + .../analogical_similarity.yaml | 4 + .../multiple_choice/analytic_entailment.yaml | 4 + .../bigbench/multiple_choice/arithmetic.yaml | 4 + .../ascii_word_recognition.yaml | 4 + .../authorship_verification.yaml | 4 + .../multiple_choice/auto_categorization.yaml | 4 + .../multiple_choice/auto_debugging.yaml | 4 + .../multiple_choice/bbq_lite_json.yaml | 4 + .../bridging_anaphora_resolution_barqa.yaml | 4 + .../multiple_choice/causal_judgment.yaml | 4 + .../multiple_choice/cause_and_effect.yaml | 4 + .../multiple_choice/checkmate_in_one.yaml | 4 + .../multiple_choice/chess_state_tracking.yaml | 4 + .../chinese_remainder_theorem.yaml | 4 + .../cifar10_classification.yaml | 4 + .../code_line_description.yaml | 4 + .../bigbench/multiple_choice/codenames.yaml | 4 + .../tasks/bigbench/multiple_choice/color.yaml | 4 + .../multiple_choice/common_morpheme.yaml | 4 + .../conceptual_combinations.yaml | 4 + .../multiple_choice/conlang_translation.yaml | 4 + ...extual_parametric_knowledge_conflicts.yaml | 4 + .../multiple_choice/crash_blossom.yaml | 4 + .../bigbench/multiple_choice/crass_ai.yaml | 4 + .../multiple_choice/cryobiology_spanish.yaml | 4 + .../bigbench/multiple_choice/cryptonite.yaml | 4 + .../multiple_choice/cs_algorithms.yaml | 4 + .../multiple_choice/dark_humor_detection.yaml | 4 + .../multiple_choice/date_understanding.yaml | 4 + .../multiple_choice/disambiguation_qa.yaml | 4 + .../discourse_marker_prediction.yaml | 4 + .../bigbench/multiple_choice/disfl_qa.yaml | 4 + .../multiple_choice/dyck_languages.yaml | 4 + .../multiple_choice/elementary_math_qa.yaml | 4 + .../bigbench/multiple_choice/emoji_movie.yaml | 4 + .../emojis_emotion_prediction.yaml | 4 + .../multiple_choice/empirical_judgments.yaml | 4 + .../multiple_choice/english_proverbs.yaml | 4 + .../english_russian_proverbs.yaml | 4 + .../multiple_choice/entailed_polarity.yaml | 4 + .../entailed_polarity_hindi.yaml | 4 + .../multiple_choice/epistemic_reasoning.yaml | 4 + .../evaluating_information_essentiality.yaml | 4 + .../multiple_choice/fact_checker.yaml | 4 + .../multiple_choice/fantasy_reasoning.yaml | 4 + .../multiple_choice/few_shot_nlg.yaml | 4 + .../figure_of_speech_detection.yaml | 4 + .../formal_fallacies_syllogisms_negation.yaml | 4 + .../tasks/bigbench/multiple_choice/gem.yaml | 4 + .../gender_inclusive_sentences_german.yaml | 4 + .../multiple_choice/general_knowledge.yaml | 4 + .../multiple_choice/geometric_shapes.yaml | 4 + .../multiple_choice/goal_step_wikihow.yaml | 4 + .../gre_reading_comprehension.yaml | 4 + .../multiple_choice/hhh_alignment.yaml | 4 + .../hindi_question_answering.yaml | 4 + .../multiple_choice/hindu_knowledge.yaml | 4 + .../multiple_choice/hinglish_toxicity.yaml | 4 + .../multiple_choice/human_organs_senses.yaml | 4 + .../bigbench/multiple_choice/hyperbaton.yaml | 4 + .../identify_math_theorems.yaml | 4 + .../identify_odd_metaphor.yaml | 4 + .../multiple_choice/implicatures.yaml | 4 + .../multiple_choice/implicit_relations.yaml | 4 + .../multiple_choice/intent_recognition.yaml | 4 + .../international_phonetic_alphabet_nli.yaml | 4 + ...ional_phonetic_alphabet_transliterate.yaml | 4 + .../multiple_choice/intersect_geometry.yaml | 4 + .../multiple_choice/irony_identification.yaml | 4 + .../bigbench/multiple_choice/kanji_ascii.yaml | 4 + .../bigbench/multiple_choice/kannada.yaml | 4 + .../multiple_choice/key_value_maps.yaml | 4 + .../multiple_choice/known_unknowns.yaml | 4 + .../multiple_choice/language_games.yaml | 4 + .../language_identification.yaml | 4 + .../multiple_choice/linguistic_mappings.yaml | 4 + .../multiple_choice/linguistics_puzzles.yaml | 4 + .../multiple_choice/list_functions.yaml | 4 + .../multiple_choice/logic_grid_puzzle.yaml | 4 + .../multiple_choice/logical_args.yaml | 4 + .../multiple_choice/logical_deduction.yaml | 4 + .../logical_fallacy_detection.yaml | 4 + .../multiple_choice/logical_sequence.yaml | 4 + .../mathematical_induction.yaml | 4 + .../multiple_choice/matrixshapes.yaml | 4 + .../multiple_choice/metaphor_boolean.yaml | 4 + .../metaphor_understanding.yaml | 4 + .../multiple_choice/minute_mysteries_qa.yaml | 4 + .../multiple_choice/misconceptions.yaml | 4 + .../misconceptions_russian.yaml | 4 + .../bigbench/multiple_choice/mnist_ascii.yaml | 4 + .../multiple_choice/modified_arithmetic.yaml | 4 + .../multiple_choice/moral_permissibility.yaml | 4 + .../movie_dialog_same_or_different.yaml | 4 + .../multiple_choice/movie_recommendation.yaml | 4 + .../multiple_choice/mult_data_wrangling.yaml | 4 + .../bigbench/multiple_choice/multiemo.yaml | 4 + .../multiple_choice/natural_instructions.yaml | 4 + .../bigbench/multiple_choice/navigate.yaml | 4 + .../nonsense_words_grammar.yaml | 4 + .../multiple_choice/novel_concepts.yaml | 4 + .../multiple_choice/object_counting.yaml | 4 + .../bigbench/multiple_choice/odd_one_out.yaml | 4 + .../bigbench/multiple_choice/operators.yaml | 4 + .../paragraph_segmentation.yaml | 4 + .../bigbench/multiple_choice/parsinlu_qa.yaml | 4 + .../parsinlu_reading_comprehension.yaml | 4 + .../multiple_choice/penguins_in_a_table.yaml | 4 + .../multiple_choice/periodic_elements.yaml | 4 + .../multiple_choice/persian_idioms.yaml | 4 + .../multiple_choice/phrase_relatedness.yaml | 4 + .../multiple_choice/physical_intuition.yaml | 4 + .../bigbench/multiple_choice/physics.yaml | 4 + .../multiple_choice/physics_questions.yaml | 4 + .../play_dialog_same_or_different.yaml | 4 + .../polish_sequence_labeling.yaml | 4 + .../presuppositions_as_nli.yaml | 4 + .../bigbench/multiple_choice/qa_wikidata.yaml | 4 + .../multiple_choice/question_selection.yaml | 4 + .../multiple_choice/real_or_fake_text.yaml | 4 + .../reasoning_about_colored_objects.yaml | 4 + .../multiple_choice/repeat_copy_logic.yaml | 4 + .../bigbench/multiple_choice/rephrase.yaml | 4 + .../multiple_choice/riddle_sense.yaml | 4 + .../bigbench/multiple_choice/ruin_names.yaml | 4 + .../salient_translation_error_detection.yaml | 4 + .../scientific_press_release.yaml | 4 + .../semantic_parsing_in_context_sparc.yaml | 4 + .../semantic_parsing_spider.yaml | 4 + .../multiple_choice/sentence_ambiguity.yaml | 4 + .../similarities_abstraction.yaml | 4 + .../multiple_choice/simp_turing_concept.yaml | 4 + .../simple_arithmetic_json.yaml | 4 + ...imple_arithmetic_json_multiple_choice.yaml | 4 + .../simple_arithmetic_json_subtasks.yaml | 4 + ...mple_arithmetic_multiple_targets_json.yaml | 4 + .../simple_ethical_questions.yaml | 4 + .../multiple_choice/simple_text_editing.yaml | 4 + .../bigbench/multiple_choice/snarks.yaml | 4 + .../bigbench/multiple_choice/social_iqa.yaml | 4 + .../multiple_choice/social_support.yaml | 4 + .../multiple_choice/sports_understanding.yaml | 4 + .../multiple_choice/strange_stories.yaml | 4 + .../bigbench/multiple_choice/strategyqa.yaml | 4 + .../sufficient_information.yaml | 4 + .../multiple_choice/suicide_risk.yaml | 4 + .../swahili_english_proverbs.yaml | 4 + .../swedish_to_german_proverbs.yaml | 4 + .../symbol_interpretation.yaml | 4 + .../multiple_choice/temporal_sequences.yaml | 4 + .../tasks/bigbench/multiple_choice/tense.yaml | 4 + .../bigbench/multiple_choice/timedial.yaml | 4 + .../multiple_choice/topical_chat.yaml | 4 + .../tracking_shuffled_objects.yaml | 4 + .../multiple_choice/understanding_fables.yaml | 4 + .../multiple_choice/undo_permutation.yaml | 4 + .../multiple_choice/unit_conversion.yaml | 4 + .../multiple_choice/unit_interpretation.yaml | 4 + .../unnatural_in_context_learning.yaml | 4 + .../vitaminc_fact_verification.yaml | 4 + .../multiple_choice/what_is_the_tao.yaml | 4 + .../multiple_choice/which_wiki_edit.yaml | 4 + .../bigbench/multiple_choice/winowhy.yaml | 4 + .../multiple_choice/word_sorting.yaml | 4 + .../multiple_choice/word_unscrambling.yaml | 4 + .../bigbench/multiple_choice_template_yaml | 10 + 338 files changed, 1607 insertions(+) create mode 100644 lm_eval/tasks/bigbench/README.md create mode 100644 lm_eval/tasks/bigbench/generate_tasks.py create mode 100644 lm_eval/tasks/bigbench/greedy_until/abstract_narrative_understanding.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/anachronisms.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/analogical_similarity.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/analytic_entailment.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/arithmetic.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/ascii_word_recognition.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/authorship_verification.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/auto_categorization.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/auto_debugging.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/bbq_lite_json.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/bridging_anaphora_resolution_barqa.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/causal_judgment.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/cause_and_effect.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/checkmate_in_one.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/chess_state_tracking.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/chinese_remainder_theorem.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/cifar10_classification.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/code_line_description.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/codenames.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/color.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/common_morpheme.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/conceptual_combinations.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/conlang_translation.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/contextual_parametric_knowledge_conflicts.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/crash_blossom.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/crass_ai.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/cryobiology_spanish.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/cryptonite.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/cs_algorithms.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/dark_humor_detection.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/date_understanding.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/disambiguation_qa.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/discourse_marker_prediction.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/disfl_qa.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/dyck_languages.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/elementary_math_qa.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/emoji_movie.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/emojis_emotion_prediction.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/empirical_judgments.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/english_proverbs.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/english_russian_proverbs.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/entailed_polarity.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/entailed_polarity_hindi.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/epistemic_reasoning.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/evaluating_information_essentiality.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/fact_checker.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/fantasy_reasoning.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/few_shot_nlg.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/figure_of_speech_detection.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/formal_fallacies_syllogisms_negation.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/gem.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/gender_inclusive_sentences_german.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/general_knowledge.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/geometric_shapes.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/goal_step_wikihow.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/gre_reading_comprehension.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/hhh_alignment.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/hindi_question_answering.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/hindu_knowledge.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/hinglish_toxicity.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/human_organs_senses.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/hyperbaton.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/identify_math_theorems.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/identify_odd_metaphor.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/implicatures.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/implicit_relations.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/intent_recognition.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_nli.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_transliterate.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/intersect_geometry.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/irony_identification.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/kanji_ascii.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/kannada.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/key_value_maps.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/known_unknowns.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/language_games.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/language_identification.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/linguistic_mappings.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/linguistics_puzzles.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/list_functions.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/logic_grid_puzzle.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/logical_args.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/logical_deduction.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/logical_fallacy_detection.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/logical_sequence.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/mathematical_induction.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/matrixshapes.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/metaphor_boolean.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/metaphor_understanding.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/minute_mysteries_qa.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/misconceptions.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/misconceptions_russian.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/mnist_ascii.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/modified_arithmetic.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/moral_permissibility.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/movie_dialog_same_or_different.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/movie_recommendation.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/mult_data_wrangling.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/multiemo.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/natural_instructions.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/navigate.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/nonsense_words_grammar.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/novel_concepts.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/object_counting.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/odd_one_out.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/operators.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/paragraph_segmentation.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/parsinlu_qa.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/parsinlu_reading_comprehension.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/penguins_in_a_table.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/periodic_elements.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/persian_idioms.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/phrase_relatedness.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/physical_intuition.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/physics.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/physics_questions.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/play_dialog_same_or_different.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/polish_sequence_labeling.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/presuppositions_as_nli.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/qa_wikidata.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/question_selection.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/real_or_fake_text.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/reasoning_about_colored_objects.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/repeat_copy_logic.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/rephrase.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/riddle_sense.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/ruin_names.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/salient_translation_error_detection.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/scientific_press_release.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/semantic_parsing_in_context_sparc.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/semantic_parsing_spider.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/sentence_ambiguity.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/similarities_abstraction.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/simp_turing_concept.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_multiple_choice.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_subtasks.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_multiple_targets_json.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/simple_ethical_questions.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/simple_text_editing.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/snarks.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/social_iqa.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/social_support.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/sports_understanding.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/strange_stories.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/strategyqa.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/sufficient_information.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/suicide_risk.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/swahili_english_proverbs.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/swedish_to_german_proverbs.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/symbol_interpretation.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/temporal_sequences.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/tense.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/timedial.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/topical_chat.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/tracking_shuffled_objects.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/understanding_fables.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/undo_permutation.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/unit_conversion.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/unit_interpretation.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/unnatural_in_context_learning.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/vitaminc_fact_verification.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/what_is_the_tao.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/which_wiki_edit.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/winowhy.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/word_sorting.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until/word_unscrambling.yaml create mode 100644 lm_eval/tasks/bigbench/greedy_until_template_yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/codenames.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/color.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/gem.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/kannada.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/language_games.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/navigate.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/operators.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/physics.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/snarks.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/social_support.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/tense.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/timedial.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml create mode 100644 lm_eval/tasks/bigbench/multiple_choice_template_yaml diff --git a/lm_eval/tasks/bigbench/README.md b/lm_eval/tasks/bigbench/README.md new file mode 100644 index 00000000..bfb7d457 --- /dev/null +++ b/lm_eval/tasks/bigbench/README.md @@ -0,0 +1,49 @@ +# BigBench + +### Paper + +Title: `Beyond the Imitation Game: Quantifying and extrapolating the capabilities of language models` + +Abstract: https://arxiv.org/abs/2206.04615 + +The Beyond the Imitation Game Benchmark (BIG-bench) is a collaborative benchmark intended to probe large language models and extrapolate their future capabilities. + +Homepage: https://github.com/google/BIG-bench + + +### Citation + +``` +@misc{srivastava2022imitation, + title={Beyond the Imitation Game: Quantifying and extrapolating the capabilities of language models}, + author={Aarohi Srivastava and Abhinav Rastogi and Abhishek Rao and Abu Awal Md Shoeb and Abubakar Abid and Adam Fisch and Adam R. Brown and Adam Santoro and Aditya Gupta and Adrià Garriga-Alonso and Agnieszka Kluska and Aitor Lewkowycz and Akshat Agarwal and Alethea Power and Alex Ray and Alex Warstadt and Alexander W. Kocurek and Ali Safaya and Ali Tazarv and Alice Xiang and Alicia Parrish and Allen Nie and Aman Hussain and Amanda Askell and Amanda Dsouza and Ambrose Slone and Ameet Rahane and Anantharaman S. Iyer and Anders Andreassen and Andrea Madotto and Andrea Santilli and Andreas Stuhlmüller and Andrew Dai and Andrew La and Andrew Lampinen and Andy Zou and Angela Jiang and Angelica Chen and Anh Vuong and Animesh Gupta and Anna Gottardi and Antonio Norelli and Anu Venkatesh and Arash Gholamidavoodi and Arfa Tabassum and Arul Menezes and Arun Kirubarajan and Asher Mullokandov and Ashish Sabharwal and Austin Herrick and Avia Efrat and Aykut Erdem and Ayla Karakaş and B. Ryan Roberts and Bao Sheng Loe and Barret Zoph and Bartłomiej Bojanowski and Batuhan Özyurt and Behnam Hedayatnia and Behnam Neyshabur and Benjamin Inden and Benno Stein and Berk Ekmekci and Bill Yuchen Lin and Blake Howald and Cameron Diao and Cameron Dour and Catherine Stinson and Cedrick Argueta and César Ferri Ramírez and Chandan Singh and Charles Rathkopf and Chenlin Meng and Chitta Baral and Chiyu Wu and Chris Callison-Burch and Chris Waites and Christian Voigt and Christopher D. Manning and Christopher Potts and Cindy Ramirez and Clara E. Rivera and Clemencia Siro and Colin Raffel and Courtney Ashcraft and Cristina Garbacea and Damien Sileo and Dan Garrette and Dan Hendrycks and Dan Kilman and Dan Roth and Daniel Freeman and Daniel Khashabi and Daniel Levy and Daniel Moseguí González and Danielle Perszyk and Danny Hernandez and Danqi Chen and Daphne Ippolito and Dar Gilboa and David Dohan and David Drakard and David Jurgens and Debajyoti Datta and Deep Ganguli and Denis Emelin and Denis Kleyko and Deniz Yuret and Derek Chen and Derek Tam and Dieuwke Hupkes and Diganta Misra and Dilyar Buzan and Dimitri Coelho Mollo and Diyi Yang and Dong-Ho Lee and Ekaterina Shutova and Ekin Dogus Cubuk and Elad Segal and Eleanor Hagerman and Elizabeth Barnes and Elizabeth Donoway and Ellie Pavlick and Emanuele Rodola and Emma Lam and Eric Chu and Eric Tang and Erkut Erdem and Ernie Chang and Ethan A. Chi and Ethan Dyer and Ethan Jerzak and Ethan Kim and Eunice Engefu Manyasi and Evgenii Zheltonozhskii and Fanyue Xia and Fatemeh Siar and Fernando Martínez-Plumed and Francesca Happé and Francois Chollet and Frieda Rong and Gaurav Mishra and Genta Indra Winata and Gerard de Melo and Germán Kruszewski and Giambattista Parascandolo and Giorgio Mariani and Gloria Wang and Gonzalo Jaimovitch-López and Gregor Betz and Guy Gur-Ari and Hana Galijasevic and Hannah Kim and Hannah Rashkin and Hannaneh Hajishirzi and Harsh Mehta and Hayden Bogar and Henry Shevlin and Hinrich Schütze and Hiromu Yakura and Hongming Zhang and Hugh Mee Wong and Ian Ng and Isaac Noble and Jaap Jumelet and Jack Geissinger and Jackson Kernion and Jacob Hilton and Jaehoon Lee and Jaime Fernández Fisac and James B. Simon and James Koppel and James Zheng and James Zou and Jan Kocoń and Jana Thompson and Jared Kaplan and Jarema Radom and Jascha Sohl-Dickstein and Jason Phang and Jason Wei and Jason Yosinski and Jekaterina Novikova and Jelle Bosscher and Jennifer Marsh and Jeremy Kim and Jeroen Taal and Jesse Engel and Jesujoba Alabi and Jiacheng Xu and Jiaming Song and Jillian Tang and Joan Waweru and John Burden and John Miller and John U. Balis and Jonathan Berant and Jörg Frohberg and Jos Rozen and Jose Hernandez-Orallo and Joseph Boudeman and Joseph Jones and Joshua B. Tenenbaum and Joshua S. Rule and Joyce Chua and Kamil Kanclerz and Karen Livescu and Karl Krauth and Karthik Gopalakrishnan and Katerina Ignatyeva and Katja Markert and Kaustubh D. Dhole and Kevin Gimpel and Kevin Omondi and Kory Mathewson and Kristen Chiafullo and Ksenia Shkaruta and Kumar Shridhar and Kyle McDonell and Kyle Richardson and Laria Reynolds and Leo Gao and Li Zhang and Liam Dugan and Lianhui Qin and Lidia Contreras-Ochando and Louis-Philippe Morency and Luca Moschella and Lucas Lam and Lucy Noble and Ludwig Schmidt and Luheng He and Luis Oliveros Colón and Luke Metz and Lütfi Kerem Şenel and Maarten Bosma and Maarten Sap and Maartje ter Hoeve and Maheen Farooqi and Manaal Faruqui and Mantas Mazeika and Marco Baturan and Marco Marelli and Marco Maru and Maria Jose Ramírez Quintana and Marie Tolkiehn and Mario Giulianelli and Martha Lewis and Martin Potthast and Matthew L. Leavitt and Matthias Hagen and Mátyás Schubert and Medina Orduna Baitemirova and Melody Arnaud and Melvin McElrath and Michael A. Yee and Michael Cohen and Michael Gu and Michael Ivanitskiy and Michael Starritt and Michael Strube and Michał Swędrowski and Michele Bevilacqua and Michihiro Yasunaga and Mihir Kale and Mike Cain and Mimee Xu and Mirac Suzgun and Mo Tiwari and Mohit Bansal and Moin Aminnaseri and Mor Geva and Mozhdeh Gheini and Mukund Varma T and Nanyun Peng and Nathan Chi and Nayeon Lee and Neta Gur-Ari Krakover and Nicholas Cameron and Nicholas Roberts and Nick Doiron and Nikita Nangia and Niklas Deckers and Niklas Muennighoff and Nitish Shirish Keskar and Niveditha S. Iyer and Noah Constant and Noah Fiedel and Nuan Wen and Oliver Zhang and Omar Agha and Omar Elbaghdadi and Omer Levy and Owain Evans and Pablo Antonio Moreno Casares and Parth Doshi and Pascale Fung and Paul Pu Liang and Paul Vicol and Pegah Alipoormolabashi and Peiyuan Liao and Percy Liang and Peter Chang and Peter Eckersley and Phu Mon Htut and Pinyu Hwang and Piotr Miłkowski and Piyush Patil and Pouya Pezeshkpour and Priti Oli and Qiaozhu Mei and Qing Lyu and Qinlang Chen and Rabin Banjade and Rachel Etta Rudolph and Raefer Gabriel and Rahel Habacker and Ramón Risco Delgado and Raphaël Millière and Rhythm Garg and Richard Barnes and Rif A. Saurous and Riku Arakawa and Robbe Raymaekers and Robert Frank and Rohan Sikand and Roman Novak and Roman Sitelew and Ronan LeBras and Rosanne Liu and Rowan Jacobs and Rui Zhang and Ruslan Salakhutdinov and Ryan Chi and Ryan Lee and Ryan Stovall and Ryan Teehan and Rylan Yang and Sahib Singh and Saif M. Mohammad and Sajant Anand and Sam Dillavou and Sam Shleifer and Sam Wiseman and Samuel Gruetter and Samuel R. Bowman and Samuel S. Schoenholz and Sanghyun Han and Sanjeev Kwatra and Sarah A. Rous and Sarik Ghazarian and Sayan Ghosh and Sean Casey and Sebastian Bischoff and Sebastian Gehrmann and Sebastian Schuster and Sepideh Sadeghi and Shadi Hamdan and Sharon Zhou and Shashank Srivastava and Sherry Shi and Shikhar Singh and Shima Asaadi and Shixiang Shane Gu and Shubh Pachchigar and Shubham Toshniwal and Shyam Upadhyay and Shyamolima and Debnath and Siamak Shakeri and Simon Thormeyer and Simone Melzi and Siva Reddy and Sneha Priscilla Makini and Soo-Hwan Lee and Spencer Torene and Sriharsha Hatwar and Stanislas Dehaene and Stefan Divic and Stefano Ermon and Stella Biderman and Stephanie Lin and Stephen Prasad and Steven T. Piantadosi and Stuart M. Shieber and Summer Misherghi and Svetlana Kiritchenko and Swaroop Mishra and Tal Linzen and Tal Schuster and Tao Li and Tao Yu and Tariq Ali and Tatsu Hashimoto and Te-Lin Wu and Théo Desbordes and Theodore Rothschild and Thomas Phan and Tianle Wang and Tiberius Nkinyili and Timo Schick and Timofei Kornev and Timothy Telleen-Lawton and Titus Tunduny and Tobias Gerstenberg and Trenton Chang and Trishala Neeraj and Tushar Khot and Tyler Shultz and Uri Shaham and Vedant Misra and Vera Demberg and Victoria Nyamai and Vikas Raunak and Vinay Ramasesh and Vinay Uday Prabhu and Vishakh Padmakumar and Vivek Srikumar and William Fedus and William Saunders and William Zhang and Wout Vossen and Xiang Ren and Xiaoyu Tong and Xinran Zhao and Xinyi Wu and Xudong Shen and Yadollah Yaghoobzadeh and Yair Lakretz and Yangqiu Song and Yasaman Bahri and Yejin Choi and Yichi Yang and Yiding Hao and Yifu Chen and Yonatan Belinkov and Yu Hou and Yufang Hou and Yuntao Bai and Zachary Seid and Zhuoye Zhao and Zijian Wang and Zijie J. Wang and Zirui Wang and Ziyi Wu}, + year={2022}, + eprint={2206.04615}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* `group_name`: `Short description` + +#### Tasks + +* `task_name`: `1-sentence description of what this particular task does` +* `task_name2`: ... + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/bigbench/generate_tasks.py b/lm_eval/tasks/bigbench/generate_tasks.py new file mode 100644 index 00000000..fa68190e --- /dev/null +++ b/lm_eval/tasks/bigbench/generate_tasks.py @@ -0,0 +1,198 @@ +import os +import yaml + +all_subtasks = [ + 'abstract_narrative_understanding', + 'anachronisms', + 'analogical_similarity', + 'analytic_entailment', + 'arithmetic', + 'ascii_word_recognition', + 'authorship_verification', + 'auto_categorization', + 'auto_debugging', + 'bbq_lite_json', + 'bridging_anaphora_resolution_barqa', + 'causal_judgment', + 'cause_and_effect', + 'checkmate_in_one', + 'chess_state_tracking', + 'chinese_remainder_theorem', + 'cifar10_classification', + 'code_line_description', + 'codenames', + 'color', + 'common_morpheme', + 'conceptual_combinations', + 'conlang_translation', + 'contextual_parametric_knowledge_conflicts', + 'crash_blossom', + 'crass_ai', + 'cryobiology_spanish', + 'cryptonite', + 'cs_algorithms', + 'dark_humor_detection', + 'date_understanding', + 'disambiguation_qa', + 'discourse_marker_prediction', + 'disfl_qa', + 'dyck_languages', + 'elementary_math_qa', + 'emoji_movie', + 'emojis_emotion_prediction', + 'empirical_judgments', + 'english_proverbs', + 'english_russian_proverbs', + 'entailed_polarity', + 'entailed_polarity_hindi', + 'epistemic_reasoning', + 'evaluating_information_essentiality', + 'fact_checker', + 'fantasy_reasoning', + 'few_shot_nlg', + 'figure_of_speech_detection', + 'formal_fallacies_syllogisms_negation', + 'gem', + 'gender_inclusive_sentences_german', + 'general_knowledge', + 'geometric_shapes', + 'goal_step_wikihow', + 'gre_reading_comprehension', + 'hhh_alignment', + 'hindi_question_answering', + 'hindu_knowledge', + 'hinglish_toxicity', + 'human_organs_senses', + 'hyperbaton', + 'identify_math_theorems', + 'identify_odd_metaphor', + 'implicatures', + 'implicit_relations', + 'intent_recognition', + 'international_phonetic_alphabet_nli', + 'international_phonetic_alphabet_transliterate', + 'intersect_geometry', + 'irony_identification', + 'kanji_ascii', + 'kannada', + 'key_value_maps', + 'known_unknowns', + 'language_games', + 'language_identification', + 'linguistic_mappings', + 'linguistics_puzzles', + 'list_functions', + 'logic_grid_puzzle', + 'logical_args', + 'logical_deduction', + 'logical_fallacy_detection', + 'logical_sequence', + 'mathematical_induction', + 'matrixshapes', + 'metaphor_boolean', + 'metaphor_understanding', + 'minute_mysteries_qa', + 'misconceptions', + 'misconceptions_russian', + 'mnist_ascii', + 'modified_arithmetic', + 'moral_permissibility', + 'movie_dialog_same_or_different', + 'movie_recommendation', + 'mult_data_wrangling', + 'multiemo', + 'natural_instructions', + 'navigate', + 'nonsense_words_grammar', + 'novel_concepts', + 'object_counting', + 'odd_one_out', + 'operators', + 'paragraph_segmentation', + 'parsinlu_qa', + 'parsinlu_reading_comprehension', + 'penguins_in_a_table', + 'periodic_elements', + 'persian_idioms', + 'phrase_relatedness', + 'physical_intuition', + 'physics', + 'physics_questions', + 'play_dialog_same_or_different', + 'polish_sequence_labeling', + 'presuppositions_as_nli', + 'qa_wikidata', + 'question_selection', + 'real_or_fake_text', + 'reasoning_about_colored_objects', + 'repeat_copy_logic', + 'rephrase', + 'riddle_sense', + 'ruin_names', + 'salient_translation_error_detection', + 'scientific_press_release', + 'semantic_parsing_in_context_sparc', + 'semantic_parsing_spider', + 'sentence_ambiguity', + 'similarities_abstraction', + 'simp_turing_concept', + 'simple_arithmetic_json', + 'simple_arithmetic_json_multiple_choice', + 'simple_arithmetic_json_subtasks', + 'simple_arithmetic_multiple_targets_json', + 'simple_ethical_questions', + 'simple_text_editing', + 'snarks', + 'social_iqa', + 'social_support', + 'sports_understanding', + 'strange_stories', + 'strategyqa', + 'sufficient_information', + 'suicide_risk', + 'swahili_english_proverbs', + 'swedish_to_german_proverbs', + 'symbol_interpretation', + 'temporal_sequences', + 'tense', + 'timedial', + 'topical_chat', + 'tracking_shuffled_objects', + 'understanding_fables', + 'undo_permutation', + 'unit_conversion', + 'unit_interpretation', + 'unnatural_in_context_learning', + 'vitaminc_fact_verification', + 'what_is_the_tao', + 'which_wiki_edit', + 'winowhy', + 'word_sorting', + 'word_unscrambling' + ] + + +def main() -> None: + + for path, task_type in zip(["multiple_choice", "greedy_until"], ["multiple_choice_template_yaml", "greedy_until_template_yaml"]): + os.makedirs(path, exist_ok=True) + for task in all_subtasks: + file_name = f"{task}.yaml" + try: + with open(f"{path}/{file_name}", "w") as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": f"../{task_type}", + "task": "bigbench_" + task + "_{}".format(task_type.split("_template_yaml")[0]), + "dataset_name": task, + }, + f, + width=float("inf"), allow_unicode=True + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() diff --git a/lm_eval/tasks/bigbench/greedy_until/abstract_narrative_understanding.yaml b/lm_eval/tasks/bigbench/greedy_until/abstract_narrative_understanding.yaml new file mode 100644 index 00000000..462d1be9 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/abstract_narrative_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: abstract_narrative_understanding +include: ../greedy_until_template_yaml +task: bigbench_abstract_narrative_understanding_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/anachronisms.yaml b/lm_eval/tasks/bigbench/greedy_until/anachronisms.yaml new file mode 100644 index 00000000..d62133a0 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/anachronisms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: anachronisms +include: ../greedy_until_template_yaml +task: bigbench_anachronisms_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/analogical_similarity.yaml b/lm_eval/tasks/bigbench/greedy_until/analogical_similarity.yaml new file mode 100644 index 00000000..2fedcd91 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/analogical_similarity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: analogical_similarity +include: ../greedy_until_template_yaml +task: bigbench_analogical_similarity_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/analytic_entailment.yaml b/lm_eval/tasks/bigbench/greedy_until/analytic_entailment.yaml new file mode 100644 index 00000000..58de1bcf --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/analytic_entailment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: analytic_entailment +include: ../greedy_until_template_yaml +task: bigbench_analytic_entailment_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/arithmetic.yaml b/lm_eval/tasks/bigbench/greedy_until/arithmetic.yaml new file mode 100644 index 00000000..6be6a787 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/arithmetic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: arithmetic +include: ../greedy_until_template_yaml +task: bigbench_arithmetic_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/ascii_word_recognition.yaml b/lm_eval/tasks/bigbench/greedy_until/ascii_word_recognition.yaml new file mode 100644 index 00000000..0461605a --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/ascii_word_recognition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: ascii_word_recognition +include: ../greedy_until_template_yaml +task: bigbench_ascii_word_recognition_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/authorship_verification.yaml b/lm_eval/tasks/bigbench/greedy_until/authorship_verification.yaml new file mode 100644 index 00000000..dbfa2103 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/authorship_verification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: authorship_verification +include: ../greedy_until_template_yaml +task: bigbench_authorship_verification_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/auto_categorization.yaml b/lm_eval/tasks/bigbench/greedy_until/auto_categorization.yaml new file mode 100644 index 00000000..9ab1545e --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/auto_categorization.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: auto_categorization +include: ../greedy_until_template_yaml +task: bigbench_auto_categorization_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/auto_debugging.yaml b/lm_eval/tasks/bigbench/greedy_until/auto_debugging.yaml new file mode 100644 index 00000000..e8a491c0 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/auto_debugging.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: auto_debugging +include: ../greedy_until_template_yaml +task: bigbench_auto_debugging_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/bbq_lite_json.yaml b/lm_eval/tasks/bigbench/greedy_until/bbq_lite_json.yaml new file mode 100644 index 00000000..8b97ba0a --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/bbq_lite_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: bbq_lite_json +include: ../greedy_until_template_yaml +task: bigbench_bbq_lite_json_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/bridging_anaphora_resolution_barqa.yaml b/lm_eval/tasks/bigbench/greedy_until/bridging_anaphora_resolution_barqa.yaml new file mode 100644 index 00000000..618d8dd3 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/bridging_anaphora_resolution_barqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: bridging_anaphora_resolution_barqa +include: ../greedy_until_template_yaml +task: bigbench_bridging_anaphora_resolution_barqa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/causal_judgment.yaml b/lm_eval/tasks/bigbench/greedy_until/causal_judgment.yaml new file mode 100644 index 00000000..687d59ba --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/causal_judgment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: causal_judgment +include: ../greedy_until_template_yaml +task: bigbench_causal_judgment_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/cause_and_effect.yaml b/lm_eval/tasks/bigbench/greedy_until/cause_and_effect.yaml new file mode 100644 index 00000000..a1f20264 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/cause_and_effect.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cause_and_effect +include: ../greedy_until_template_yaml +task: bigbench_cause_and_effect_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/checkmate_in_one.yaml b/lm_eval/tasks/bigbench/greedy_until/checkmate_in_one.yaml new file mode 100644 index 00000000..4089a228 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/checkmate_in_one.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: checkmate_in_one +include: ../greedy_until_template_yaml +task: bigbench_checkmate_in_one_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/chess_state_tracking.yaml b/lm_eval/tasks/bigbench/greedy_until/chess_state_tracking.yaml new file mode 100644 index 00000000..727e7879 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/chess_state_tracking.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: chess_state_tracking +include: ../greedy_until_template_yaml +task: bigbench_chess_state_tracking_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/chinese_remainder_theorem.yaml b/lm_eval/tasks/bigbench/greedy_until/chinese_remainder_theorem.yaml new file mode 100644 index 00000000..6af0bcbf --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/chinese_remainder_theorem.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: chinese_remainder_theorem +include: ../greedy_until_template_yaml +task: bigbench_chinese_remainder_theorem_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/cifar10_classification.yaml b/lm_eval/tasks/bigbench/greedy_until/cifar10_classification.yaml new file mode 100644 index 00000000..3e0bf92c --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/cifar10_classification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cifar10_classification +include: ../greedy_until_template_yaml +task: bigbench_cifar10_classification_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/code_line_description.yaml b/lm_eval/tasks/bigbench/greedy_until/code_line_description.yaml new file mode 100644 index 00000000..624ab362 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/code_line_description.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: code_line_description +include: ../greedy_until_template_yaml +task: bigbench_code_line_description_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/codenames.yaml b/lm_eval/tasks/bigbench/greedy_until/codenames.yaml new file mode 100644 index 00000000..6ea8f12e --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/codenames.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: codenames +include: ../greedy_until_template_yaml +task: bigbench_codenames_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/color.yaml b/lm_eval/tasks/bigbench/greedy_until/color.yaml new file mode 100644 index 00000000..4ae393fd --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/color.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: color +include: ../greedy_until_template_yaml +task: bigbench_color_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/common_morpheme.yaml b/lm_eval/tasks/bigbench/greedy_until/common_morpheme.yaml new file mode 100644 index 00000000..90d183ad --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/common_morpheme.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: common_morpheme +include: ../greedy_until_template_yaml +task: bigbench_common_morpheme_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/conceptual_combinations.yaml b/lm_eval/tasks/bigbench/greedy_until/conceptual_combinations.yaml new file mode 100644 index 00000000..007649a4 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/conceptual_combinations.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: conceptual_combinations +include: ../greedy_until_template_yaml +task: bigbench_conceptual_combinations_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/conlang_translation.yaml b/lm_eval/tasks/bigbench/greedy_until/conlang_translation.yaml new file mode 100644 index 00000000..3b5bafac --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/conlang_translation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: conlang_translation +include: ../greedy_until_template_yaml +task: bigbench_conlang_translation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/contextual_parametric_knowledge_conflicts.yaml b/lm_eval/tasks/bigbench/greedy_until/contextual_parametric_knowledge_conflicts.yaml new file mode 100644 index 00000000..dc594b9b --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/contextual_parametric_knowledge_conflicts.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: contextual_parametric_knowledge_conflicts +include: ../greedy_until_template_yaml +task: bigbench_contextual_parametric_knowledge_conflicts_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/crash_blossom.yaml b/lm_eval/tasks/bigbench/greedy_until/crash_blossom.yaml new file mode 100644 index 00000000..aca19b7b --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/crash_blossom.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: crash_blossom +include: ../greedy_until_template_yaml +task: bigbench_crash_blossom_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/crass_ai.yaml b/lm_eval/tasks/bigbench/greedy_until/crass_ai.yaml new file mode 100644 index 00000000..043e8f47 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/crass_ai.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: crass_ai +include: ../greedy_until_template_yaml +task: bigbench_crass_ai_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/cryobiology_spanish.yaml b/lm_eval/tasks/bigbench/greedy_until/cryobiology_spanish.yaml new file mode 100644 index 00000000..eb9c5b3b --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/cryobiology_spanish.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cryobiology_spanish +include: ../greedy_until_template_yaml +task: bigbench_cryobiology_spanish_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/cryptonite.yaml b/lm_eval/tasks/bigbench/greedy_until/cryptonite.yaml new file mode 100644 index 00000000..15c181b2 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/cryptonite.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cryptonite +include: ../greedy_until_template_yaml +task: bigbench_cryptonite_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/cs_algorithms.yaml b/lm_eval/tasks/bigbench/greedy_until/cs_algorithms.yaml new file mode 100644 index 00000000..477c2497 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/cs_algorithms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cs_algorithms +include: ../greedy_until_template_yaml +task: bigbench_cs_algorithms_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/dark_humor_detection.yaml b/lm_eval/tasks/bigbench/greedy_until/dark_humor_detection.yaml new file mode 100644 index 00000000..0521848d --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/dark_humor_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: dark_humor_detection +include: ../greedy_until_template_yaml +task: bigbench_dark_humor_detection_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/date_understanding.yaml b/lm_eval/tasks/bigbench/greedy_until/date_understanding.yaml new file mode 100644 index 00000000..5936e98f --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/date_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: date_understanding +include: ../greedy_until_template_yaml +task: bigbench_date_understanding_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/disambiguation_qa.yaml b/lm_eval/tasks/bigbench/greedy_until/disambiguation_qa.yaml new file mode 100644 index 00000000..ffe5135a --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/disambiguation_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: disambiguation_qa +include: ../greedy_until_template_yaml +task: bigbench_disambiguation_qa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/discourse_marker_prediction.yaml b/lm_eval/tasks/bigbench/greedy_until/discourse_marker_prediction.yaml new file mode 100644 index 00000000..aed41150 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/discourse_marker_prediction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: discourse_marker_prediction +include: ../greedy_until_template_yaml +task: bigbench_discourse_marker_prediction_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/disfl_qa.yaml b/lm_eval/tasks/bigbench/greedy_until/disfl_qa.yaml new file mode 100644 index 00000000..fd087719 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/disfl_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: disfl_qa +include: ../greedy_until_template_yaml +task: bigbench_disfl_qa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/dyck_languages.yaml b/lm_eval/tasks/bigbench/greedy_until/dyck_languages.yaml new file mode 100644 index 00000000..af29b7f8 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/dyck_languages.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: dyck_languages +include: ../greedy_until_template_yaml +task: bigbench_dyck_languages_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/elementary_math_qa.yaml b/lm_eval/tasks/bigbench/greedy_until/elementary_math_qa.yaml new file mode 100644 index 00000000..ea1a61ba --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/elementary_math_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: elementary_math_qa +include: ../greedy_until_template_yaml +task: bigbench_elementary_math_qa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/emoji_movie.yaml b/lm_eval/tasks/bigbench/greedy_until/emoji_movie.yaml new file mode 100644 index 00000000..a8368f62 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/emoji_movie.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: emoji_movie +include: ../greedy_until_template_yaml +task: bigbench_emoji_movie_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/emojis_emotion_prediction.yaml b/lm_eval/tasks/bigbench/greedy_until/emojis_emotion_prediction.yaml new file mode 100644 index 00000000..f8392d66 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/emojis_emotion_prediction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: emojis_emotion_prediction +include: ../greedy_until_template_yaml +task: bigbench_emojis_emotion_prediction_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/empirical_judgments.yaml b/lm_eval/tasks/bigbench/greedy_until/empirical_judgments.yaml new file mode 100644 index 00000000..97ea08c8 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/empirical_judgments.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: empirical_judgments +include: ../greedy_until_template_yaml +task: bigbench_empirical_judgments_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/english_proverbs.yaml b/lm_eval/tasks/bigbench/greedy_until/english_proverbs.yaml new file mode 100644 index 00000000..2eaa4a9b --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/english_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: english_proverbs +include: ../greedy_until_template_yaml +task: bigbench_english_proverbs_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/english_russian_proverbs.yaml b/lm_eval/tasks/bigbench/greedy_until/english_russian_proverbs.yaml new file mode 100644 index 00000000..d0386e50 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/english_russian_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: english_russian_proverbs +include: ../greedy_until_template_yaml +task: bigbench_english_russian_proverbs_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/entailed_polarity.yaml b/lm_eval/tasks/bigbench/greedy_until/entailed_polarity.yaml new file mode 100644 index 00000000..efb1f853 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/entailed_polarity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: entailed_polarity +include: ../greedy_until_template_yaml +task: bigbench_entailed_polarity_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/entailed_polarity_hindi.yaml b/lm_eval/tasks/bigbench/greedy_until/entailed_polarity_hindi.yaml new file mode 100644 index 00000000..5922a065 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/entailed_polarity_hindi.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: entailed_polarity_hindi +include: ../greedy_until_template_yaml +task: bigbench_entailed_polarity_hindi_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/epistemic_reasoning.yaml b/lm_eval/tasks/bigbench/greedy_until/epistemic_reasoning.yaml new file mode 100644 index 00000000..d6307592 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/epistemic_reasoning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: epistemic_reasoning +include: ../greedy_until_template_yaml +task: bigbench_epistemic_reasoning_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/evaluating_information_essentiality.yaml b/lm_eval/tasks/bigbench/greedy_until/evaluating_information_essentiality.yaml new file mode 100644 index 00000000..13b6dd32 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/evaluating_information_essentiality.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: evaluating_information_essentiality +include: ../greedy_until_template_yaml +task: bigbench_evaluating_information_essentiality_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/fact_checker.yaml b/lm_eval/tasks/bigbench/greedy_until/fact_checker.yaml new file mode 100644 index 00000000..6d3ccf9f --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/fact_checker.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: fact_checker +include: ../greedy_until_template_yaml +task: bigbench_fact_checker_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/fantasy_reasoning.yaml b/lm_eval/tasks/bigbench/greedy_until/fantasy_reasoning.yaml new file mode 100644 index 00000000..16415a7b --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/fantasy_reasoning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: fantasy_reasoning +include: ../greedy_until_template_yaml +task: bigbench_fantasy_reasoning_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/few_shot_nlg.yaml b/lm_eval/tasks/bigbench/greedy_until/few_shot_nlg.yaml new file mode 100644 index 00000000..229e1c70 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/few_shot_nlg.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: few_shot_nlg +include: ../greedy_until_template_yaml +task: bigbench_few_shot_nlg_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/figure_of_speech_detection.yaml b/lm_eval/tasks/bigbench/greedy_until/figure_of_speech_detection.yaml new file mode 100644 index 00000000..059f9f33 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/figure_of_speech_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: figure_of_speech_detection +include: ../greedy_until_template_yaml +task: bigbench_figure_of_speech_detection_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/formal_fallacies_syllogisms_negation.yaml b/lm_eval/tasks/bigbench/greedy_until/formal_fallacies_syllogisms_negation.yaml new file mode 100644 index 00000000..663a718c --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/formal_fallacies_syllogisms_negation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: formal_fallacies_syllogisms_negation +include: ../greedy_until_template_yaml +task: bigbench_formal_fallacies_syllogisms_negation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/gem.yaml b/lm_eval/tasks/bigbench/greedy_until/gem.yaml new file mode 100644 index 00000000..79492583 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/gem.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gem +include: ../greedy_until_template_yaml +task: bigbench_gem_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/gender_inclusive_sentences_german.yaml b/lm_eval/tasks/bigbench/greedy_until/gender_inclusive_sentences_german.yaml new file mode 100644 index 00000000..10414179 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/gender_inclusive_sentences_german.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gender_inclusive_sentences_german +include: ../greedy_until_template_yaml +task: bigbench_gender_inclusive_sentences_german_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/general_knowledge.yaml b/lm_eval/tasks/bigbench/greedy_until/general_knowledge.yaml new file mode 100644 index 00000000..b2a14656 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/general_knowledge.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: general_knowledge +include: ../greedy_until_template_yaml +task: bigbench_general_knowledge_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/geometric_shapes.yaml b/lm_eval/tasks/bigbench/greedy_until/geometric_shapes.yaml new file mode 100644 index 00000000..4e256462 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/geometric_shapes.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: geometric_shapes +include: ../greedy_until_template_yaml +task: bigbench_geometric_shapes_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/goal_step_wikihow.yaml b/lm_eval/tasks/bigbench/greedy_until/goal_step_wikihow.yaml new file mode 100644 index 00000000..d865e3d4 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/goal_step_wikihow.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: goal_step_wikihow +include: ../greedy_until_template_yaml +task: bigbench_goal_step_wikihow_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/gre_reading_comprehension.yaml b/lm_eval/tasks/bigbench/greedy_until/gre_reading_comprehension.yaml new file mode 100644 index 00000000..9f044835 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/gre_reading_comprehension.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gre_reading_comprehension +include: ../greedy_until_template_yaml +task: bigbench_gre_reading_comprehension_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/hhh_alignment.yaml b/lm_eval/tasks/bigbench/greedy_until/hhh_alignment.yaml new file mode 100644 index 00000000..1ab62b56 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/hhh_alignment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hhh_alignment +include: ../greedy_until_template_yaml +task: bigbench_hhh_alignment_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/hindi_question_answering.yaml b/lm_eval/tasks/bigbench/greedy_until/hindi_question_answering.yaml new file mode 100644 index 00000000..3a0fa8b2 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/hindi_question_answering.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hindi_question_answering +include: ../greedy_until_template_yaml +task: bigbench_hindi_question_answering_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/hindu_knowledge.yaml b/lm_eval/tasks/bigbench/greedy_until/hindu_knowledge.yaml new file mode 100644 index 00000000..19162629 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/hindu_knowledge.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hindu_knowledge +include: ../greedy_until_template_yaml +task: bigbench_hindu_knowledge_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/hinglish_toxicity.yaml b/lm_eval/tasks/bigbench/greedy_until/hinglish_toxicity.yaml new file mode 100644 index 00000000..84073aa0 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/hinglish_toxicity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hinglish_toxicity +include: ../greedy_until_template_yaml +task: bigbench_hinglish_toxicity_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/human_organs_senses.yaml b/lm_eval/tasks/bigbench/greedy_until/human_organs_senses.yaml new file mode 100644 index 00000000..32fc0058 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/human_organs_senses.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: human_organs_senses +include: ../greedy_until_template_yaml +task: bigbench_human_organs_senses_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/hyperbaton.yaml b/lm_eval/tasks/bigbench/greedy_until/hyperbaton.yaml new file mode 100644 index 00000000..d3a65a87 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/hyperbaton.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hyperbaton +include: ../greedy_until_template_yaml +task: bigbench_hyperbaton_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/identify_math_theorems.yaml b/lm_eval/tasks/bigbench/greedy_until/identify_math_theorems.yaml new file mode 100644 index 00000000..616085c8 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/identify_math_theorems.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: identify_math_theorems +include: ../greedy_until_template_yaml +task: bigbench_identify_math_theorems_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/identify_odd_metaphor.yaml b/lm_eval/tasks/bigbench/greedy_until/identify_odd_metaphor.yaml new file mode 100644 index 00000000..6500f7a9 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/identify_odd_metaphor.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: identify_odd_metaphor +include: ../greedy_until_template_yaml +task: bigbench_identify_odd_metaphor_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/implicatures.yaml b/lm_eval/tasks/bigbench/greedy_until/implicatures.yaml new file mode 100644 index 00000000..fdc133f5 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/implicatures.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: implicatures +include: ../greedy_until_template_yaml +task: bigbench_implicatures_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/implicit_relations.yaml b/lm_eval/tasks/bigbench/greedy_until/implicit_relations.yaml new file mode 100644 index 00000000..b05af0ad --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/implicit_relations.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: implicit_relations +include: ../greedy_until_template_yaml +task: bigbench_implicit_relations_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/intent_recognition.yaml b/lm_eval/tasks/bigbench/greedy_until/intent_recognition.yaml new file mode 100644 index 00000000..37769770 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/intent_recognition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: intent_recognition +include: ../greedy_until_template_yaml +task: bigbench_intent_recognition_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_nli.yaml b/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_nli.yaml new file mode 100644 index 00000000..81b975c9 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_nli.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: international_phonetic_alphabet_nli +include: ../greedy_until_template_yaml +task: bigbench_international_phonetic_alphabet_nli_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_transliterate.yaml b/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_transliterate.yaml new file mode 100644 index 00000000..ac664332 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_transliterate.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: international_phonetic_alphabet_transliterate +include: ../greedy_until_template_yaml +task: bigbench_international_phonetic_alphabet_transliterate_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/intersect_geometry.yaml b/lm_eval/tasks/bigbench/greedy_until/intersect_geometry.yaml new file mode 100644 index 00000000..d08f1d6a --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/intersect_geometry.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: intersect_geometry +include: ../greedy_until_template_yaml +task: bigbench_intersect_geometry_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/irony_identification.yaml b/lm_eval/tasks/bigbench/greedy_until/irony_identification.yaml new file mode 100644 index 00000000..d9d5961c --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/irony_identification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: irony_identification +include: ../greedy_until_template_yaml +task: bigbench_irony_identification_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/kanji_ascii.yaml b/lm_eval/tasks/bigbench/greedy_until/kanji_ascii.yaml new file mode 100644 index 00000000..b6a7470f --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/kanji_ascii.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: kanji_ascii +include: ../greedy_until_template_yaml +task: bigbench_kanji_ascii_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/kannada.yaml b/lm_eval/tasks/bigbench/greedy_until/kannada.yaml new file mode 100644 index 00000000..50ad13c1 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/kannada.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: kannada +include: ../greedy_until_template_yaml +task: bigbench_kannada_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/key_value_maps.yaml b/lm_eval/tasks/bigbench/greedy_until/key_value_maps.yaml new file mode 100644 index 00000000..6d5ad040 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/key_value_maps.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: key_value_maps +include: ../greedy_until_template_yaml +task: bigbench_key_value_maps_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/known_unknowns.yaml b/lm_eval/tasks/bigbench/greedy_until/known_unknowns.yaml new file mode 100644 index 00000000..c07e0e8c --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/known_unknowns.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: known_unknowns +include: ../greedy_until_template_yaml +task: bigbench_known_unknowns_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/language_games.yaml b/lm_eval/tasks/bigbench/greedy_until/language_games.yaml new file mode 100644 index 00000000..392a7190 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/language_games.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: language_games +include: ../greedy_until_template_yaml +task: bigbench_language_games_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/language_identification.yaml b/lm_eval/tasks/bigbench/greedy_until/language_identification.yaml new file mode 100644 index 00000000..583d9108 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/language_identification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: language_identification +include: ../greedy_until_template_yaml +task: bigbench_language_identification_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/linguistic_mappings.yaml b/lm_eval/tasks/bigbench/greedy_until/linguistic_mappings.yaml new file mode 100644 index 00000000..92a855a8 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/linguistic_mappings.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: linguistic_mappings +include: ../greedy_until_template_yaml +task: bigbench_linguistic_mappings_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/linguistics_puzzles.yaml b/lm_eval/tasks/bigbench/greedy_until/linguistics_puzzles.yaml new file mode 100644 index 00000000..7aec6607 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/linguistics_puzzles.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: linguistics_puzzles +include: ../greedy_until_template_yaml +task: bigbench_linguistics_puzzles_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/list_functions.yaml b/lm_eval/tasks/bigbench/greedy_until/list_functions.yaml new file mode 100644 index 00000000..f7f0d436 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/list_functions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: list_functions +include: ../greedy_until_template_yaml +task: bigbench_list_functions_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/logic_grid_puzzle.yaml b/lm_eval/tasks/bigbench/greedy_until/logic_grid_puzzle.yaml new file mode 100644 index 00000000..2699b12f --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/logic_grid_puzzle.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logic_grid_puzzle +include: ../greedy_until_template_yaml +task: bigbench_logic_grid_puzzle_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/logical_args.yaml b/lm_eval/tasks/bigbench/greedy_until/logical_args.yaml new file mode 100644 index 00000000..9a263f96 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/logical_args.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_args +include: ../greedy_until_template_yaml +task: bigbench_logical_args_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/logical_deduction.yaml b/lm_eval/tasks/bigbench/greedy_until/logical_deduction.yaml new file mode 100644 index 00000000..5e72facb --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/logical_deduction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_deduction +include: ../greedy_until_template_yaml +task: bigbench_logical_deduction_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/logical_fallacy_detection.yaml b/lm_eval/tasks/bigbench/greedy_until/logical_fallacy_detection.yaml new file mode 100644 index 00000000..a21fbc58 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/logical_fallacy_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_fallacy_detection +include: ../greedy_until_template_yaml +task: bigbench_logical_fallacy_detection_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/logical_sequence.yaml b/lm_eval/tasks/bigbench/greedy_until/logical_sequence.yaml new file mode 100644 index 00000000..f01ce277 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/logical_sequence.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_sequence +include: ../greedy_until_template_yaml +task: bigbench_logical_sequence_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/mathematical_induction.yaml b/lm_eval/tasks/bigbench/greedy_until/mathematical_induction.yaml new file mode 100644 index 00000000..d4b2fcf6 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/mathematical_induction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mathematical_induction +include: ../greedy_until_template_yaml +task: bigbench_mathematical_induction_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/matrixshapes.yaml b/lm_eval/tasks/bigbench/greedy_until/matrixshapes.yaml new file mode 100644 index 00000000..adf86ecc --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/matrixshapes.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: matrixshapes +include: ../greedy_until_template_yaml +task: bigbench_matrixshapes_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/metaphor_boolean.yaml b/lm_eval/tasks/bigbench/greedy_until/metaphor_boolean.yaml new file mode 100644 index 00000000..94893b4d --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/metaphor_boolean.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: metaphor_boolean +include: ../greedy_until_template_yaml +task: bigbench_metaphor_boolean_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/metaphor_understanding.yaml b/lm_eval/tasks/bigbench/greedy_until/metaphor_understanding.yaml new file mode 100644 index 00000000..8ca4da75 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/metaphor_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: metaphor_understanding +include: ../greedy_until_template_yaml +task: bigbench_metaphor_understanding_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/minute_mysteries_qa.yaml b/lm_eval/tasks/bigbench/greedy_until/minute_mysteries_qa.yaml new file mode 100644 index 00000000..b9db2b8f --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/minute_mysteries_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: minute_mysteries_qa +include: ../greedy_until_template_yaml +task: bigbench_minute_mysteries_qa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/misconceptions.yaml b/lm_eval/tasks/bigbench/greedy_until/misconceptions.yaml new file mode 100644 index 00000000..60c8221b --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/misconceptions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: misconceptions +include: ../greedy_until_template_yaml +task: bigbench_misconceptions_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/misconceptions_russian.yaml b/lm_eval/tasks/bigbench/greedy_until/misconceptions_russian.yaml new file mode 100644 index 00000000..a1fca685 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/misconceptions_russian.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: misconceptions_russian +include: ../greedy_until_template_yaml +task: bigbench_misconceptions_russian_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/mnist_ascii.yaml b/lm_eval/tasks/bigbench/greedy_until/mnist_ascii.yaml new file mode 100644 index 00000000..b845caa3 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/mnist_ascii.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mnist_ascii +include: ../greedy_until_template_yaml +task: bigbench_mnist_ascii_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/modified_arithmetic.yaml b/lm_eval/tasks/bigbench/greedy_until/modified_arithmetic.yaml new file mode 100644 index 00000000..5dc888f6 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/modified_arithmetic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: modified_arithmetic +include: ../greedy_until_template_yaml +task: bigbench_modified_arithmetic_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/moral_permissibility.yaml b/lm_eval/tasks/bigbench/greedy_until/moral_permissibility.yaml new file mode 100644 index 00000000..a20c23be --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/moral_permissibility.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: moral_permissibility +include: ../greedy_until_template_yaml +task: bigbench_moral_permissibility_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/movie_dialog_same_or_different.yaml b/lm_eval/tasks/bigbench/greedy_until/movie_dialog_same_or_different.yaml new file mode 100644 index 00000000..db57a939 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/movie_dialog_same_or_different.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: movie_dialog_same_or_different +include: ../greedy_until_template_yaml +task: bigbench_movie_dialog_same_or_different_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/movie_recommendation.yaml b/lm_eval/tasks/bigbench/greedy_until/movie_recommendation.yaml new file mode 100644 index 00000000..00a0c1a4 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/movie_recommendation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: movie_recommendation +include: ../greedy_until_template_yaml +task: bigbench_movie_recommendation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/mult_data_wrangling.yaml b/lm_eval/tasks/bigbench/greedy_until/mult_data_wrangling.yaml new file mode 100644 index 00000000..7a1003cf --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/mult_data_wrangling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mult_data_wrangling +include: ../greedy_until_template_yaml +task: bigbench_mult_data_wrangling_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/multiemo.yaml b/lm_eval/tasks/bigbench/greedy_until/multiemo.yaml new file mode 100644 index 00000000..df230d77 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/multiemo.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: multiemo +include: ../greedy_until_template_yaml +task: bigbench_multiemo_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/natural_instructions.yaml b/lm_eval/tasks/bigbench/greedy_until/natural_instructions.yaml new file mode 100644 index 00000000..cc800106 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/natural_instructions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: natural_instructions +include: ../greedy_until_template_yaml +task: bigbench_natural_instructions_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/navigate.yaml b/lm_eval/tasks/bigbench/greedy_until/navigate.yaml new file mode 100644 index 00000000..1e3004d5 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/navigate.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: navigate +include: ../greedy_until_template_yaml +task: bigbench_navigate_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/nonsense_words_grammar.yaml b/lm_eval/tasks/bigbench/greedy_until/nonsense_words_grammar.yaml new file mode 100644 index 00000000..169b6743 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/nonsense_words_grammar.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: nonsense_words_grammar +include: ../greedy_until_template_yaml +task: bigbench_nonsense_words_grammar_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/novel_concepts.yaml b/lm_eval/tasks/bigbench/greedy_until/novel_concepts.yaml new file mode 100644 index 00000000..9618dce2 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/novel_concepts.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: novel_concepts +include: ../greedy_until_template_yaml +task: bigbench_novel_concepts_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/object_counting.yaml b/lm_eval/tasks/bigbench/greedy_until/object_counting.yaml new file mode 100644 index 00000000..7b058748 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/object_counting.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: object_counting +include: ../greedy_until_template_yaml +task: bigbench_object_counting_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/odd_one_out.yaml b/lm_eval/tasks/bigbench/greedy_until/odd_one_out.yaml new file mode 100644 index 00000000..1742789e --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/odd_one_out.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: odd_one_out +include: ../greedy_until_template_yaml +task: bigbench_odd_one_out_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/operators.yaml b/lm_eval/tasks/bigbench/greedy_until/operators.yaml new file mode 100644 index 00000000..d71d87c2 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/operators.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: operators +include: ../greedy_until_template_yaml +task: bigbench_operators_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/paragraph_segmentation.yaml b/lm_eval/tasks/bigbench/greedy_until/paragraph_segmentation.yaml new file mode 100644 index 00000000..13d8fb9d --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/paragraph_segmentation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: paragraph_segmentation +include: ../greedy_until_template_yaml +task: bigbench_paragraph_segmentation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/parsinlu_qa.yaml b/lm_eval/tasks/bigbench/greedy_until/parsinlu_qa.yaml new file mode 100644 index 00000000..f8b78f8d --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/parsinlu_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: parsinlu_qa +include: ../greedy_until_template_yaml +task: bigbench_parsinlu_qa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/parsinlu_reading_comprehension.yaml b/lm_eval/tasks/bigbench/greedy_until/parsinlu_reading_comprehension.yaml new file mode 100644 index 00000000..4db292d0 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/parsinlu_reading_comprehension.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: parsinlu_reading_comprehension +include: ../greedy_until_template_yaml +task: bigbench_parsinlu_reading_comprehension_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/penguins_in_a_table.yaml b/lm_eval/tasks/bigbench/greedy_until/penguins_in_a_table.yaml new file mode 100644 index 00000000..a282fa64 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/penguins_in_a_table.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: penguins_in_a_table +include: ../greedy_until_template_yaml +task: bigbench_penguins_in_a_table_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/periodic_elements.yaml b/lm_eval/tasks/bigbench/greedy_until/periodic_elements.yaml new file mode 100644 index 00000000..458a2e3d --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/periodic_elements.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: periodic_elements +include: ../greedy_until_template_yaml +task: bigbench_periodic_elements_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/persian_idioms.yaml b/lm_eval/tasks/bigbench/greedy_until/persian_idioms.yaml new file mode 100644 index 00000000..e51eb69a --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/persian_idioms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: persian_idioms +include: ../greedy_until_template_yaml +task: bigbench_persian_idioms_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/phrase_relatedness.yaml b/lm_eval/tasks/bigbench/greedy_until/phrase_relatedness.yaml new file mode 100644 index 00000000..3b03a67f --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/phrase_relatedness.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: phrase_relatedness +include: ../greedy_until_template_yaml +task: bigbench_phrase_relatedness_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/physical_intuition.yaml b/lm_eval/tasks/bigbench/greedy_until/physical_intuition.yaml new file mode 100644 index 00000000..358b7db3 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/physical_intuition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physical_intuition +include: ../greedy_until_template_yaml +task: bigbench_physical_intuition_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/physics.yaml b/lm_eval/tasks/bigbench/greedy_until/physics.yaml new file mode 100644 index 00000000..d9d6f936 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/physics.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physics +include: ../greedy_until_template_yaml +task: bigbench_physics_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/physics_questions.yaml b/lm_eval/tasks/bigbench/greedy_until/physics_questions.yaml new file mode 100644 index 00000000..6af11448 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/physics_questions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physics_questions +include: ../greedy_until_template_yaml +task: bigbench_physics_questions_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/play_dialog_same_or_different.yaml b/lm_eval/tasks/bigbench/greedy_until/play_dialog_same_or_different.yaml new file mode 100644 index 00000000..600143e9 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/play_dialog_same_or_different.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: play_dialog_same_or_different +include: ../greedy_until_template_yaml +task: bigbench_play_dialog_same_or_different_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/polish_sequence_labeling.yaml b/lm_eval/tasks/bigbench/greedy_until/polish_sequence_labeling.yaml new file mode 100644 index 00000000..432820ad --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/polish_sequence_labeling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: polish_sequence_labeling +include: ../greedy_until_template_yaml +task: bigbench_polish_sequence_labeling_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/presuppositions_as_nli.yaml b/lm_eval/tasks/bigbench/greedy_until/presuppositions_as_nli.yaml new file mode 100644 index 00000000..c492b17f --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/presuppositions_as_nli.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: presuppositions_as_nli +include: ../greedy_until_template_yaml +task: bigbench_presuppositions_as_nli_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/qa_wikidata.yaml b/lm_eval/tasks/bigbench/greedy_until/qa_wikidata.yaml new file mode 100644 index 00000000..a23ea6e7 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/qa_wikidata.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: qa_wikidata +include: ../greedy_until_template_yaml +task: bigbench_qa_wikidata_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/question_selection.yaml b/lm_eval/tasks/bigbench/greedy_until/question_selection.yaml new file mode 100644 index 00000000..47953c14 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/question_selection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: question_selection +include: ../greedy_until_template_yaml +task: bigbench_question_selection_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/real_or_fake_text.yaml b/lm_eval/tasks/bigbench/greedy_until/real_or_fake_text.yaml new file mode 100644 index 00000000..e15af76e --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/real_or_fake_text.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: real_or_fake_text +include: ../greedy_until_template_yaml +task: bigbench_real_or_fake_text_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bigbench/greedy_until/reasoning_about_colored_objects.yaml new file mode 100644 index 00000000..b1aa5ec0 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/reasoning_about_colored_objects.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: reasoning_about_colored_objects +include: ../greedy_until_template_yaml +task: bigbench_reasoning_about_colored_objects_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/repeat_copy_logic.yaml b/lm_eval/tasks/bigbench/greedy_until/repeat_copy_logic.yaml new file mode 100644 index 00000000..12831cc7 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/repeat_copy_logic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: repeat_copy_logic +include: ../greedy_until_template_yaml +task: bigbench_repeat_copy_logic_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/rephrase.yaml b/lm_eval/tasks/bigbench/greedy_until/rephrase.yaml new file mode 100644 index 00000000..78c6bda7 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/rephrase.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: rephrase +include: ../greedy_until_template_yaml +task: bigbench_rephrase_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/riddle_sense.yaml b/lm_eval/tasks/bigbench/greedy_until/riddle_sense.yaml new file mode 100644 index 00000000..e93b4aa9 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/riddle_sense.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: riddle_sense +include: ../greedy_until_template_yaml +task: bigbench_riddle_sense_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/ruin_names.yaml b/lm_eval/tasks/bigbench/greedy_until/ruin_names.yaml new file mode 100644 index 00000000..46039e1f --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/ruin_names.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: ruin_names +include: ../greedy_until_template_yaml +task: bigbench_ruin_names_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/salient_translation_error_detection.yaml b/lm_eval/tasks/bigbench/greedy_until/salient_translation_error_detection.yaml new file mode 100644 index 00000000..a7e5c77e --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/salient_translation_error_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: salient_translation_error_detection +include: ../greedy_until_template_yaml +task: bigbench_salient_translation_error_detection_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/scientific_press_release.yaml b/lm_eval/tasks/bigbench/greedy_until/scientific_press_release.yaml new file mode 100644 index 00000000..aa35e659 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/scientific_press_release.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: scientific_press_release +include: ../greedy_until_template_yaml +task: bigbench_scientific_press_release_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_in_context_sparc.yaml b/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_in_context_sparc.yaml new file mode 100644 index 00000000..184bfcb9 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_in_context_sparc.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: semantic_parsing_in_context_sparc +include: ../greedy_until_template_yaml +task: bigbench_semantic_parsing_in_context_sparc_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_spider.yaml b/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_spider.yaml new file mode 100644 index 00000000..ae0b9461 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_spider.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: semantic_parsing_spider +include: ../greedy_until_template_yaml +task: bigbench_semantic_parsing_spider_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/sentence_ambiguity.yaml b/lm_eval/tasks/bigbench/greedy_until/sentence_ambiguity.yaml new file mode 100644 index 00000000..bb72ec88 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/sentence_ambiguity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sentence_ambiguity +include: ../greedy_until_template_yaml +task: bigbench_sentence_ambiguity_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/similarities_abstraction.yaml b/lm_eval/tasks/bigbench/greedy_until/similarities_abstraction.yaml new file mode 100644 index 00000000..5c1ef27f --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/similarities_abstraction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: similarities_abstraction +include: ../greedy_until_template_yaml +task: bigbench_similarities_abstraction_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simp_turing_concept.yaml b/lm_eval/tasks/bigbench/greedy_until/simp_turing_concept.yaml new file mode 100644 index 00000000..742df0fb --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/simp_turing_concept.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simp_turing_concept +include: ../greedy_until_template_yaml +task: bigbench_simp_turing_concept_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json.yaml new file mode 100644 index 00000000..4e70a160 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json +include: ../greedy_until_template_yaml +task: bigbench_simple_arithmetic_json_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_multiple_choice.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_multiple_choice.yaml new file mode 100644 index 00000000..5f6b6732 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_multiple_choice.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json_multiple_choice +include: ../greedy_until_template_yaml +task: bigbench_simple_arithmetic_json_multiple_choice_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_subtasks.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_subtasks.yaml new file mode 100644 index 00000000..32c5fcfd --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_subtasks.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json_subtasks +include: ../greedy_until_template_yaml +task: bigbench_simple_arithmetic_json_subtasks_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_multiple_targets_json.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_multiple_targets_json.yaml new file mode 100644 index 00000000..0d87803e --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_multiple_targets_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_multiple_targets_json +include: ../greedy_until_template_yaml +task: bigbench_simple_arithmetic_multiple_targets_json_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_ethical_questions.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_ethical_questions.yaml new file mode 100644 index 00000000..2332985c --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/simple_ethical_questions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_ethical_questions +include: ../greedy_until_template_yaml +task: bigbench_simple_ethical_questions_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_text_editing.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_text_editing.yaml new file mode 100644 index 00000000..1d9943e4 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/simple_text_editing.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_text_editing +include: ../greedy_until_template_yaml +task: bigbench_simple_text_editing_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/snarks.yaml b/lm_eval/tasks/bigbench/greedy_until/snarks.yaml new file mode 100644 index 00000000..e98308e1 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/snarks.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: snarks +include: ../greedy_until_template_yaml +task: bigbench_snarks_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/social_iqa.yaml b/lm_eval/tasks/bigbench/greedy_until/social_iqa.yaml new file mode 100644 index 00000000..7af09b30 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/social_iqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: social_iqa +include: ../greedy_until_template_yaml +task: bigbench_social_iqa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/social_support.yaml b/lm_eval/tasks/bigbench/greedy_until/social_support.yaml new file mode 100644 index 00000000..8e34e758 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/social_support.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: social_support +include: ../greedy_until_template_yaml +task: bigbench_social_support_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/sports_understanding.yaml b/lm_eval/tasks/bigbench/greedy_until/sports_understanding.yaml new file mode 100644 index 00000000..3ae80c24 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/sports_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sports_understanding +include: ../greedy_until_template_yaml +task: bigbench_sports_understanding_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/strange_stories.yaml b/lm_eval/tasks/bigbench/greedy_until/strange_stories.yaml new file mode 100644 index 00000000..b6020b08 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/strange_stories.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: strange_stories +include: ../greedy_until_template_yaml +task: bigbench_strange_stories_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/strategyqa.yaml b/lm_eval/tasks/bigbench/greedy_until/strategyqa.yaml new file mode 100644 index 00000000..066c89d1 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/strategyqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: strategyqa +include: ../greedy_until_template_yaml +task: bigbench_strategyqa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/sufficient_information.yaml b/lm_eval/tasks/bigbench/greedy_until/sufficient_information.yaml new file mode 100644 index 00000000..27ef04dd --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/sufficient_information.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sufficient_information +include: ../greedy_until_template_yaml +task: bigbench_sufficient_information_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/suicide_risk.yaml b/lm_eval/tasks/bigbench/greedy_until/suicide_risk.yaml new file mode 100644 index 00000000..a7496025 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/suicide_risk.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: suicide_risk +include: ../greedy_until_template_yaml +task: bigbench_suicide_risk_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/swahili_english_proverbs.yaml b/lm_eval/tasks/bigbench/greedy_until/swahili_english_proverbs.yaml new file mode 100644 index 00000000..25e7dfe6 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/swahili_english_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: swahili_english_proverbs +include: ../greedy_until_template_yaml +task: bigbench_swahili_english_proverbs_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/swedish_to_german_proverbs.yaml b/lm_eval/tasks/bigbench/greedy_until/swedish_to_german_proverbs.yaml new file mode 100644 index 00000000..1beebb17 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/swedish_to_german_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: swedish_to_german_proverbs +include: ../greedy_until_template_yaml +task: bigbench_swedish_to_german_proverbs_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/symbol_interpretation.yaml b/lm_eval/tasks/bigbench/greedy_until/symbol_interpretation.yaml new file mode 100644 index 00000000..27b29a05 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/symbol_interpretation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: symbol_interpretation +include: ../greedy_until_template_yaml +task: bigbench_symbol_interpretation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/temporal_sequences.yaml b/lm_eval/tasks/bigbench/greedy_until/temporal_sequences.yaml new file mode 100644 index 00000000..6ed42414 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/temporal_sequences.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: temporal_sequences +include: ../greedy_until_template_yaml +task: bigbench_temporal_sequences_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/tense.yaml b/lm_eval/tasks/bigbench/greedy_until/tense.yaml new file mode 100644 index 00000000..49adc7c2 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/tense.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: tense +include: ../greedy_until_template_yaml +task: bigbench_tense_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/timedial.yaml b/lm_eval/tasks/bigbench/greedy_until/timedial.yaml new file mode 100644 index 00000000..391dff43 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/timedial.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: timedial +include: ../greedy_until_template_yaml +task: bigbench_timedial_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/topical_chat.yaml b/lm_eval/tasks/bigbench/greedy_until/topical_chat.yaml new file mode 100644 index 00000000..f9f1893f --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/topical_chat.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: topical_chat +include: ../greedy_until_template_yaml +task: bigbench_topical_chat_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/tracking_shuffled_objects.yaml b/lm_eval/tasks/bigbench/greedy_until/tracking_shuffled_objects.yaml new file mode 100644 index 00000000..675b0e37 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/tracking_shuffled_objects.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: tracking_shuffled_objects +include: ../greedy_until_template_yaml +task: bigbench_tracking_shuffled_objects_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/understanding_fables.yaml b/lm_eval/tasks/bigbench/greedy_until/understanding_fables.yaml new file mode 100644 index 00000000..3c5ff40a --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/understanding_fables.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: understanding_fables +include: ../greedy_until_template_yaml +task: bigbench_understanding_fables_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/undo_permutation.yaml b/lm_eval/tasks/bigbench/greedy_until/undo_permutation.yaml new file mode 100644 index 00000000..8e0c0699 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/undo_permutation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: undo_permutation +include: ../greedy_until_template_yaml +task: bigbench_undo_permutation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/unit_conversion.yaml b/lm_eval/tasks/bigbench/greedy_until/unit_conversion.yaml new file mode 100644 index 00000000..384ccc05 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/unit_conversion.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unit_conversion +include: ../greedy_until_template_yaml +task: bigbench_unit_conversion_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/unit_interpretation.yaml b/lm_eval/tasks/bigbench/greedy_until/unit_interpretation.yaml new file mode 100644 index 00000000..a33bfd51 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/unit_interpretation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unit_interpretation +include: ../greedy_until_template_yaml +task: bigbench_unit_interpretation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/unnatural_in_context_learning.yaml b/lm_eval/tasks/bigbench/greedy_until/unnatural_in_context_learning.yaml new file mode 100644 index 00000000..cb3d2572 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/unnatural_in_context_learning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unnatural_in_context_learning +include: ../greedy_until_template_yaml +task: bigbench_unnatural_in_context_learning_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/vitaminc_fact_verification.yaml b/lm_eval/tasks/bigbench/greedy_until/vitaminc_fact_verification.yaml new file mode 100644 index 00000000..67380ab9 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/vitaminc_fact_verification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: vitaminc_fact_verification +include: ../greedy_until_template_yaml +task: bigbench_vitaminc_fact_verification_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/what_is_the_tao.yaml b/lm_eval/tasks/bigbench/greedy_until/what_is_the_tao.yaml new file mode 100644 index 00000000..baad0d9e --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/what_is_the_tao.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: what_is_the_tao +include: ../greedy_until_template_yaml +task: bigbench_what_is_the_tao_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/which_wiki_edit.yaml b/lm_eval/tasks/bigbench/greedy_until/which_wiki_edit.yaml new file mode 100644 index 00000000..70047ee7 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/which_wiki_edit.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: which_wiki_edit +include: ../greedy_until_template_yaml +task: bigbench_which_wiki_edit_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/winowhy.yaml b/lm_eval/tasks/bigbench/greedy_until/winowhy.yaml new file mode 100644 index 00000000..fff312b3 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/winowhy.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: winowhy +include: ../greedy_until_template_yaml +task: bigbench_winowhy_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/word_sorting.yaml b/lm_eval/tasks/bigbench/greedy_until/word_sorting.yaml new file mode 100644 index 00000000..77b55d77 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/word_sorting.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_sorting +include: ../greedy_until_template_yaml +task: bigbench_word_sorting_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/word_unscrambling.yaml b/lm_eval/tasks/bigbench/greedy_until/word_unscrambling.yaml new file mode 100644 index 00000000..75fe7de9 --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until/word_unscrambling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_unscrambling +include: ../greedy_until_template_yaml +task: bigbench_word_unscrambling_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until_template_yaml b/lm_eval/tasks/bigbench/greedy_until_template_yaml new file mode 100644 index 00000000..1d4e492b --- /dev/null +++ b/lm_eval/tasks/bigbench/greedy_until_template_yaml @@ -0,0 +1,14 @@ +group: bigbench +dataset_path: bigbench +output_type: greedy_until +training_split: train +validation_split: validation +doc_to_text: inputs +doc_to_target: "{{targets[0]}}" +generation_kwargs: + max_length: 128 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_punctuation: true diff --git a/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml b/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml new file mode 100644 index 00000000..e815ad82 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: abstract_narrative_understanding +include: ../multiple_choice_template_yaml +task: bigbench_abstract_narrative_understanding_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml b/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml new file mode 100644 index 00000000..0edb33ae --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: anachronisms +include: ../multiple_choice_template_yaml +task: bigbench_anachronisms_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml b/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml new file mode 100644 index 00000000..4a63e23a --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: analogical_similarity +include: ../multiple_choice_template_yaml +task: bigbench_analogical_similarity_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml b/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml new file mode 100644 index 00000000..3503337d --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: analytic_entailment +include: ../multiple_choice_template_yaml +task: bigbench_analytic_entailment_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml b/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml new file mode 100644 index 00000000..a7af2d17 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: arithmetic +include: ../multiple_choice_template_yaml +task: bigbench_arithmetic_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml b/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml new file mode 100644 index 00000000..9eca1362 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: ascii_word_recognition +include: ../multiple_choice_template_yaml +task: bigbench_ascii_word_recognition_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml b/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml new file mode 100644 index 00000000..0c49e8ee --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: authorship_verification +include: ../multiple_choice_template_yaml +task: bigbench_authorship_verification_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml b/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml new file mode 100644 index 00000000..108cc802 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: auto_categorization +include: ../multiple_choice_template_yaml +task: bigbench_auto_categorization_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml b/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml new file mode 100644 index 00000000..7ae0c2a5 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: auto_debugging +include: ../multiple_choice_template_yaml +task: bigbench_auto_debugging_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml b/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml new file mode 100644 index 00000000..6cb2bff4 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: bbq_lite_json +include: ../multiple_choice_template_yaml +task: bigbench_bbq_lite_json_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml b/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml new file mode 100644 index 00000000..33871759 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: bridging_anaphora_resolution_barqa +include: ../multiple_choice_template_yaml +task: bigbench_bridging_anaphora_resolution_barqa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml b/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml new file mode 100644 index 00000000..340e9bda --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: causal_judgment +include: ../multiple_choice_template_yaml +task: bigbench_causal_judgment_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml b/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml new file mode 100644 index 00000000..4b3dd1a6 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cause_and_effect +include: ../multiple_choice_template_yaml +task: bigbench_cause_and_effect_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml b/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml new file mode 100644 index 00000000..000360c0 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: checkmate_in_one +include: ../multiple_choice_template_yaml +task: bigbench_checkmate_in_one_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml b/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml new file mode 100644 index 00000000..b6d1f2e2 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: chess_state_tracking +include: ../multiple_choice_template_yaml +task: bigbench_chess_state_tracking_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml b/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml new file mode 100644 index 00000000..2552166c --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: chinese_remainder_theorem +include: ../multiple_choice_template_yaml +task: bigbench_chinese_remainder_theorem_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml b/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml new file mode 100644 index 00000000..a03e56c0 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cifar10_classification +include: ../multiple_choice_template_yaml +task: bigbench_cifar10_classification_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml b/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml new file mode 100644 index 00000000..f2a33424 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: code_line_description +include: ../multiple_choice_template_yaml +task: bigbench_code_line_description_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml b/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml new file mode 100644 index 00000000..c03dc365 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: codenames +include: ../multiple_choice_template_yaml +task: bigbench_codenames_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/color.yaml b/lm_eval/tasks/bigbench/multiple_choice/color.yaml new file mode 100644 index 00000000..f49710c7 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/color.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: color +include: ../multiple_choice_template_yaml +task: bigbench_color_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml b/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml new file mode 100644 index 00000000..619c8eea --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: common_morpheme +include: ../multiple_choice_template_yaml +task: bigbench_common_morpheme_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml b/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml new file mode 100644 index 00000000..a7570bb0 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: conceptual_combinations +include: ../multiple_choice_template_yaml +task: bigbench_conceptual_combinations_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml b/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml new file mode 100644 index 00000000..4ff6ef02 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: conlang_translation +include: ../multiple_choice_template_yaml +task: bigbench_conlang_translation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml b/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml new file mode 100644 index 00000000..4b34eec8 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: contextual_parametric_knowledge_conflicts +include: ../multiple_choice_template_yaml +task: bigbench_contextual_parametric_knowledge_conflicts_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml b/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml new file mode 100644 index 00000000..2b0b9d46 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: crash_blossom +include: ../multiple_choice_template_yaml +task: bigbench_crash_blossom_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml b/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml new file mode 100644 index 00000000..c203459a --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: crass_ai +include: ../multiple_choice_template_yaml +task: bigbench_crass_ai_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml b/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml new file mode 100644 index 00000000..c8cdd625 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cryobiology_spanish +include: ../multiple_choice_template_yaml +task: bigbench_cryobiology_spanish_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml b/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml new file mode 100644 index 00000000..503cd601 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cryptonite +include: ../multiple_choice_template_yaml +task: bigbench_cryptonite_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml b/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml new file mode 100644 index 00000000..bb9d90b0 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cs_algorithms +include: ../multiple_choice_template_yaml +task: bigbench_cs_algorithms_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml b/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml new file mode 100644 index 00000000..cb00651a --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: dark_humor_detection +include: ../multiple_choice_template_yaml +task: bigbench_dark_humor_detection_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml b/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml new file mode 100644 index 00000000..596a941e --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: date_understanding +include: ../multiple_choice_template_yaml +task: bigbench_date_understanding_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml b/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml new file mode 100644 index 00000000..5264c21f --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: disambiguation_qa +include: ../multiple_choice_template_yaml +task: bigbench_disambiguation_qa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml b/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml new file mode 100644 index 00000000..151616c2 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: discourse_marker_prediction +include: ../multiple_choice_template_yaml +task: bigbench_discourse_marker_prediction_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml b/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml new file mode 100644 index 00000000..578df2a3 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: disfl_qa +include: ../multiple_choice_template_yaml +task: bigbench_disfl_qa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml b/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml new file mode 100644 index 00000000..07ecf4d4 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: dyck_languages +include: ../multiple_choice_template_yaml +task: bigbench_dyck_languages_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml b/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml new file mode 100644 index 00000000..d9e41204 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: elementary_math_qa +include: ../multiple_choice_template_yaml +task: bigbench_elementary_math_qa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml b/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml new file mode 100644 index 00000000..f6528de7 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: emoji_movie +include: ../multiple_choice_template_yaml +task: bigbench_emoji_movie_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml b/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml new file mode 100644 index 00000000..cedbd41c --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: emojis_emotion_prediction +include: ../multiple_choice_template_yaml +task: bigbench_emojis_emotion_prediction_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml b/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml new file mode 100644 index 00000000..078a3c45 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: empirical_judgments +include: ../multiple_choice_template_yaml +task: bigbench_empirical_judgments_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml b/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml new file mode 100644 index 00000000..0dd3a6c6 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: english_proverbs +include: ../multiple_choice_template_yaml +task: bigbench_english_proverbs_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml b/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml new file mode 100644 index 00000000..12c7dae6 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: english_russian_proverbs +include: ../multiple_choice_template_yaml +task: bigbench_english_russian_proverbs_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml b/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml new file mode 100644 index 00000000..336a013e --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: entailed_polarity +include: ../multiple_choice_template_yaml +task: bigbench_entailed_polarity_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml b/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml new file mode 100644 index 00000000..4d1bf0e8 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: entailed_polarity_hindi +include: ../multiple_choice_template_yaml +task: bigbench_entailed_polarity_hindi_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml b/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml new file mode 100644 index 00000000..79827577 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: epistemic_reasoning +include: ../multiple_choice_template_yaml +task: bigbench_epistemic_reasoning_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml b/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml new file mode 100644 index 00000000..f82cd899 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: evaluating_information_essentiality +include: ../multiple_choice_template_yaml +task: bigbench_evaluating_information_essentiality_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml b/lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml new file mode 100644 index 00000000..2e20aabe --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: fact_checker +include: ../multiple_choice_template_yaml +task: bigbench_fact_checker_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml b/lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml new file mode 100644 index 00000000..e7931f2f --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: fantasy_reasoning +include: ../multiple_choice_template_yaml +task: bigbench_fantasy_reasoning_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml b/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml new file mode 100644 index 00000000..593c4860 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: few_shot_nlg +include: ../multiple_choice_template_yaml +task: bigbench_few_shot_nlg_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml b/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml new file mode 100644 index 00000000..00f07670 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: figure_of_speech_detection +include: ../multiple_choice_template_yaml +task: bigbench_figure_of_speech_detection_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml b/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml new file mode 100644 index 00000000..b2eb5aca --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: formal_fallacies_syllogisms_negation +include: ../multiple_choice_template_yaml +task: bigbench_formal_fallacies_syllogisms_negation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/gem.yaml b/lm_eval/tasks/bigbench/multiple_choice/gem.yaml new file mode 100644 index 00000000..5fd4caae --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/gem.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gem +include: ../multiple_choice_template_yaml +task: bigbench_gem_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml b/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml new file mode 100644 index 00000000..77d16864 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gender_inclusive_sentences_german +include: ../multiple_choice_template_yaml +task: bigbench_gender_inclusive_sentences_german_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml b/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml new file mode 100644 index 00000000..021ad284 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: general_knowledge +include: ../multiple_choice_template_yaml +task: bigbench_general_knowledge_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml b/lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml new file mode 100644 index 00000000..cfc2ada2 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: geometric_shapes +include: ../multiple_choice_template_yaml +task: bigbench_geometric_shapes_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml b/lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml new file mode 100644 index 00000000..e457887f --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: goal_step_wikihow +include: ../multiple_choice_template_yaml +task: bigbench_goal_step_wikihow_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml b/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml new file mode 100644 index 00000000..8ec630d5 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gre_reading_comprehension +include: ../multiple_choice_template_yaml +task: bigbench_gre_reading_comprehension_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml b/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml new file mode 100644 index 00000000..94272e8a --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hhh_alignment +include: ../multiple_choice_template_yaml +task: bigbench_hhh_alignment_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml b/lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml new file mode 100644 index 00000000..0ab2cecd --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hindi_question_answering +include: ../multiple_choice_template_yaml +task: bigbench_hindi_question_answering_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml b/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml new file mode 100644 index 00000000..2d49951b --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hindu_knowledge +include: ../multiple_choice_template_yaml +task: bigbench_hindu_knowledge_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml b/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml new file mode 100644 index 00000000..4c7ca8bd --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hinglish_toxicity +include: ../multiple_choice_template_yaml +task: bigbench_hinglish_toxicity_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml b/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml new file mode 100644 index 00000000..d04bccc9 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: human_organs_senses +include: ../multiple_choice_template_yaml +task: bigbench_human_organs_senses_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml b/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml new file mode 100644 index 00000000..9e15ffac --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hyperbaton +include: ../multiple_choice_template_yaml +task: bigbench_hyperbaton_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml b/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml new file mode 100644 index 00000000..dfb75722 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: identify_math_theorems +include: ../multiple_choice_template_yaml +task: bigbench_identify_math_theorems_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml b/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml new file mode 100644 index 00000000..3657d3b0 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: identify_odd_metaphor +include: ../multiple_choice_template_yaml +task: bigbench_identify_odd_metaphor_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml b/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml new file mode 100644 index 00000000..8c2d4c81 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: implicatures +include: ../multiple_choice_template_yaml +task: bigbench_implicatures_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml b/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml new file mode 100644 index 00000000..a837cdf3 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: implicit_relations +include: ../multiple_choice_template_yaml +task: bigbench_implicit_relations_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml b/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml new file mode 100644 index 00000000..9d9cb82b --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: intent_recognition +include: ../multiple_choice_template_yaml +task: bigbench_intent_recognition_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml b/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml new file mode 100644 index 00000000..715582af --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: international_phonetic_alphabet_nli +include: ../multiple_choice_template_yaml +task: bigbench_international_phonetic_alphabet_nli_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml b/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml new file mode 100644 index 00000000..cd6f6f71 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: international_phonetic_alphabet_transliterate +include: ../multiple_choice_template_yaml +task: bigbench_international_phonetic_alphabet_transliterate_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml b/lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml new file mode 100644 index 00000000..d6448572 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: intersect_geometry +include: ../multiple_choice_template_yaml +task: bigbench_intersect_geometry_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml b/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml new file mode 100644 index 00000000..bb8385f2 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: irony_identification +include: ../multiple_choice_template_yaml +task: bigbench_irony_identification_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml b/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml new file mode 100644 index 00000000..7ba101c8 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: kanji_ascii +include: ../multiple_choice_template_yaml +task: bigbench_kanji_ascii_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml b/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml new file mode 100644 index 00000000..e3767b21 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: kannada +include: ../multiple_choice_template_yaml +task: bigbench_kannada_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml b/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml new file mode 100644 index 00000000..88c6bf5e --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: key_value_maps +include: ../multiple_choice_template_yaml +task: bigbench_key_value_maps_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml b/lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml new file mode 100644 index 00000000..de972c64 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: known_unknowns +include: ../multiple_choice_template_yaml +task: bigbench_known_unknowns_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml b/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml new file mode 100644 index 00000000..3e17fd8f --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: language_games +include: ../multiple_choice_template_yaml +task: bigbench_language_games_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml b/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml new file mode 100644 index 00000000..e17cdc69 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: language_identification +include: ../multiple_choice_template_yaml +task: bigbench_language_identification_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml b/lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml new file mode 100644 index 00000000..118de388 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: linguistic_mappings +include: ../multiple_choice_template_yaml +task: bigbench_linguistic_mappings_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml b/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml new file mode 100644 index 00000000..4799e672 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: linguistics_puzzles +include: ../multiple_choice_template_yaml +task: bigbench_linguistics_puzzles_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml b/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml new file mode 100644 index 00000000..f2c94ada --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: list_functions +include: ../multiple_choice_template_yaml +task: bigbench_list_functions_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml b/lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml new file mode 100644 index 00000000..c24e71ac --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logic_grid_puzzle +include: ../multiple_choice_template_yaml +task: bigbench_logic_grid_puzzle_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml b/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml new file mode 100644 index 00000000..11e2771e --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_args +include: ../multiple_choice_template_yaml +task: bigbench_logical_args_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml b/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml new file mode 100644 index 00000000..0de47251 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_deduction +include: ../multiple_choice_template_yaml +task: bigbench_logical_deduction_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml b/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml new file mode 100644 index 00000000..b4d68c1b --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_fallacy_detection +include: ../multiple_choice_template_yaml +task: bigbench_logical_fallacy_detection_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml b/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml new file mode 100644 index 00000000..e58224b9 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_sequence +include: ../multiple_choice_template_yaml +task: bigbench_logical_sequence_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml b/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml new file mode 100644 index 00000000..316b8eed --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mathematical_induction +include: ../multiple_choice_template_yaml +task: bigbench_mathematical_induction_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml b/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml new file mode 100644 index 00000000..ebbc32f5 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: matrixshapes +include: ../multiple_choice_template_yaml +task: bigbench_matrixshapes_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml b/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml new file mode 100644 index 00000000..2bbe0c00 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: metaphor_boolean +include: ../multiple_choice_template_yaml +task: bigbench_metaphor_boolean_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml b/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml new file mode 100644 index 00000000..ae0fab49 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: metaphor_understanding +include: ../multiple_choice_template_yaml +task: bigbench_metaphor_understanding_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml b/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml new file mode 100644 index 00000000..76b1bac0 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: minute_mysteries_qa +include: ../multiple_choice_template_yaml +task: bigbench_minute_mysteries_qa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml b/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml new file mode 100644 index 00000000..dce2a5c2 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: misconceptions +include: ../multiple_choice_template_yaml +task: bigbench_misconceptions_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml b/lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml new file mode 100644 index 00000000..fca2b324 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: misconceptions_russian +include: ../multiple_choice_template_yaml +task: bigbench_misconceptions_russian_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml b/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml new file mode 100644 index 00000000..ac32701f --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mnist_ascii +include: ../multiple_choice_template_yaml +task: bigbench_mnist_ascii_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml b/lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml new file mode 100644 index 00000000..fd5c271a --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: modified_arithmetic +include: ../multiple_choice_template_yaml +task: bigbench_modified_arithmetic_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml b/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml new file mode 100644 index 00000000..95414745 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: moral_permissibility +include: ../multiple_choice_template_yaml +task: bigbench_moral_permissibility_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml b/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml new file mode 100644 index 00000000..831b261a --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: movie_dialog_same_or_different +include: ../multiple_choice_template_yaml +task: bigbench_movie_dialog_same_or_different_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml b/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml new file mode 100644 index 00000000..16d4ea55 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: movie_recommendation +include: ../multiple_choice_template_yaml +task: bigbench_movie_recommendation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml b/lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml new file mode 100644 index 00000000..b7693b06 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mult_data_wrangling +include: ../multiple_choice_template_yaml +task: bigbench_mult_data_wrangling_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml b/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml new file mode 100644 index 00000000..8c954b5d --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: multiemo +include: ../multiple_choice_template_yaml +task: bigbench_multiemo_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml b/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml new file mode 100644 index 00000000..78d295c5 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: natural_instructions +include: ../multiple_choice_template_yaml +task: bigbench_natural_instructions_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/navigate.yaml b/lm_eval/tasks/bigbench/multiple_choice/navigate.yaml new file mode 100644 index 00000000..3bc9f120 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/navigate.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: navigate +include: ../multiple_choice_template_yaml +task: bigbench_navigate_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml b/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml new file mode 100644 index 00000000..7a7b2d80 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: nonsense_words_grammar +include: ../multiple_choice_template_yaml +task: bigbench_nonsense_words_grammar_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml b/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml new file mode 100644 index 00000000..04172c1a --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: novel_concepts +include: ../multiple_choice_template_yaml +task: bigbench_novel_concepts_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml b/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml new file mode 100644 index 00000000..c6ab4011 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: object_counting +include: ../multiple_choice_template_yaml +task: bigbench_object_counting_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml b/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml new file mode 100644 index 00000000..82d70a63 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: odd_one_out +include: ../multiple_choice_template_yaml +task: bigbench_odd_one_out_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/operators.yaml b/lm_eval/tasks/bigbench/multiple_choice/operators.yaml new file mode 100644 index 00000000..e48c8005 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/operators.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: operators +include: ../multiple_choice_template_yaml +task: bigbench_operators_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml b/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml new file mode 100644 index 00000000..3423101a --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: paragraph_segmentation +include: ../multiple_choice_template_yaml +task: bigbench_paragraph_segmentation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml b/lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml new file mode 100644 index 00000000..a2f65cde --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: parsinlu_qa +include: ../multiple_choice_template_yaml +task: bigbench_parsinlu_qa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml b/lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml new file mode 100644 index 00000000..3f0f6182 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: parsinlu_reading_comprehension +include: ../multiple_choice_template_yaml +task: bigbench_parsinlu_reading_comprehension_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml b/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml new file mode 100644 index 00000000..ed4945f9 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: penguins_in_a_table +include: ../multiple_choice_template_yaml +task: bigbench_penguins_in_a_table_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml b/lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml new file mode 100644 index 00000000..5adb9422 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: periodic_elements +include: ../multiple_choice_template_yaml +task: bigbench_periodic_elements_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml b/lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml new file mode 100644 index 00000000..c0ee240f --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: persian_idioms +include: ../multiple_choice_template_yaml +task: bigbench_persian_idioms_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml b/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml new file mode 100644 index 00000000..6231e5c0 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: phrase_relatedness +include: ../multiple_choice_template_yaml +task: bigbench_phrase_relatedness_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml b/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml new file mode 100644 index 00000000..50353ac7 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physical_intuition +include: ../multiple_choice_template_yaml +task: bigbench_physical_intuition_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/physics.yaml b/lm_eval/tasks/bigbench/multiple_choice/physics.yaml new file mode 100644 index 00000000..f3b4244e --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/physics.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physics +include: ../multiple_choice_template_yaml +task: bigbench_physics_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml b/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml new file mode 100644 index 00000000..ec5e9531 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physics_questions +include: ../multiple_choice_template_yaml +task: bigbench_physics_questions_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml b/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml new file mode 100644 index 00000000..a81f33b0 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: play_dialog_same_or_different +include: ../multiple_choice_template_yaml +task: bigbench_play_dialog_same_or_different_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml b/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml new file mode 100644 index 00000000..af82fce2 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: polish_sequence_labeling +include: ../multiple_choice_template_yaml +task: bigbench_polish_sequence_labeling_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml b/lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml new file mode 100644 index 00000000..83b733a3 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: presuppositions_as_nli +include: ../multiple_choice_template_yaml +task: bigbench_presuppositions_as_nli_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml b/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml new file mode 100644 index 00000000..5f52b44c --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: qa_wikidata +include: ../multiple_choice_template_yaml +task: bigbench_qa_wikidata_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml b/lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml new file mode 100644 index 00000000..1b4301bb --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: question_selection +include: ../multiple_choice_template_yaml +task: bigbench_question_selection_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml b/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml new file mode 100644 index 00000000..d41cd5dd --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: real_or_fake_text +include: ../multiple_choice_template_yaml +task: bigbench_real_or_fake_text_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml new file mode 100644 index 00000000..e5e6f520 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: reasoning_about_colored_objects +include: ../multiple_choice_template_yaml +task: bigbench_reasoning_about_colored_objects_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml b/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml new file mode 100644 index 00000000..73406e70 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: repeat_copy_logic +include: ../multiple_choice_template_yaml +task: bigbench_repeat_copy_logic_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml b/lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml new file mode 100644 index 00000000..b785712c --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: rephrase +include: ../multiple_choice_template_yaml +task: bigbench_rephrase_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml b/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml new file mode 100644 index 00000000..e8aff5b3 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: riddle_sense +include: ../multiple_choice_template_yaml +task: bigbench_riddle_sense_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml b/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml new file mode 100644 index 00000000..7504f388 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: ruin_names +include: ../multiple_choice_template_yaml +task: bigbench_ruin_names_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml b/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml new file mode 100644 index 00000000..a462eb15 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: salient_translation_error_detection +include: ../multiple_choice_template_yaml +task: bigbench_salient_translation_error_detection_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml b/lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml new file mode 100644 index 00000000..5ea881cd --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: scientific_press_release +include: ../multiple_choice_template_yaml +task: bigbench_scientific_press_release_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml b/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml new file mode 100644 index 00000000..886b61be --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: semantic_parsing_in_context_sparc +include: ../multiple_choice_template_yaml +task: bigbench_semantic_parsing_in_context_sparc_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml b/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml new file mode 100644 index 00000000..cb5dc922 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: semantic_parsing_spider +include: ../multiple_choice_template_yaml +task: bigbench_semantic_parsing_spider_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml b/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml new file mode 100644 index 00000000..573f6199 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sentence_ambiguity +include: ../multiple_choice_template_yaml +task: bigbench_sentence_ambiguity_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml b/lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml new file mode 100644 index 00000000..1e0c4ffb --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: similarities_abstraction +include: ../multiple_choice_template_yaml +task: bigbench_similarities_abstraction_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml b/lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml new file mode 100644 index 00000000..2e453821 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simp_turing_concept +include: ../multiple_choice_template_yaml +task: bigbench_simp_turing_concept_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml new file mode 100644 index 00000000..e5e24f58 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json +include: ../multiple_choice_template_yaml +task: bigbench_simple_arithmetic_json_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml new file mode 100644 index 00000000..4fb67ac5 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json_multiple_choice +include: ../multiple_choice_template_yaml +task: bigbench_simple_arithmetic_json_multiple_choice_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml new file mode 100644 index 00000000..67853d68 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json_subtasks +include: ../multiple_choice_template_yaml +task: bigbench_simple_arithmetic_json_subtasks_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml new file mode 100644 index 00000000..b76bfbde --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_multiple_targets_json +include: ../multiple_choice_template_yaml +task: bigbench_simple_arithmetic_multiple_targets_json_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml new file mode 100644 index 00000000..a8a10ca6 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_ethical_questions +include: ../multiple_choice_template_yaml +task: bigbench_simple_ethical_questions_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml new file mode 100644 index 00000000..3bbecfb9 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_text_editing +include: ../multiple_choice_template_yaml +task: bigbench_simple_text_editing_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml b/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml new file mode 100644 index 00000000..4e0b9d3a --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: snarks +include: ../multiple_choice_template_yaml +task: bigbench_snarks_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml b/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml new file mode 100644 index 00000000..de12bcbd --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: social_iqa +include: ../multiple_choice_template_yaml +task: bigbench_social_iqa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/social_support.yaml b/lm_eval/tasks/bigbench/multiple_choice/social_support.yaml new file mode 100644 index 00000000..f2e8c795 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/social_support.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: social_support +include: ../multiple_choice_template_yaml +task: bigbench_social_support_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml b/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml new file mode 100644 index 00000000..4a3914a4 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sports_understanding +include: ../multiple_choice_template_yaml +task: bigbench_sports_understanding_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml b/lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml new file mode 100644 index 00000000..f0882aa2 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: strange_stories +include: ../multiple_choice_template_yaml +task: bigbench_strange_stories_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml b/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml new file mode 100644 index 00000000..e99618c0 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: strategyqa +include: ../multiple_choice_template_yaml +task: bigbench_strategyqa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml b/lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml new file mode 100644 index 00000000..56af1ae2 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sufficient_information +include: ../multiple_choice_template_yaml +task: bigbench_sufficient_information_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml b/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml new file mode 100644 index 00000000..5c6f0cd2 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: suicide_risk +include: ../multiple_choice_template_yaml +task: bigbench_suicide_risk_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml b/lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml new file mode 100644 index 00000000..497980ae --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: swahili_english_proverbs +include: ../multiple_choice_template_yaml +task: bigbench_swahili_english_proverbs_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml b/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml new file mode 100644 index 00000000..46d49ddc --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: swedish_to_german_proverbs +include: ../multiple_choice_template_yaml +task: bigbench_swedish_to_german_proverbs_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml b/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml new file mode 100644 index 00000000..a6032ad9 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: symbol_interpretation +include: ../multiple_choice_template_yaml +task: bigbench_symbol_interpretation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml b/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml new file mode 100644 index 00000000..4a63b2ac --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: temporal_sequences +include: ../multiple_choice_template_yaml +task: bigbench_temporal_sequences_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/tense.yaml b/lm_eval/tasks/bigbench/multiple_choice/tense.yaml new file mode 100644 index 00000000..4fce296d --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/tense.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: tense +include: ../multiple_choice_template_yaml +task: bigbench_tense_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/timedial.yaml b/lm_eval/tasks/bigbench/multiple_choice/timedial.yaml new file mode 100644 index 00000000..550d1190 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/timedial.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: timedial +include: ../multiple_choice_template_yaml +task: bigbench_timedial_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml b/lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml new file mode 100644 index 00000000..232dc706 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: topical_chat +include: ../multiple_choice_template_yaml +task: bigbench_topical_chat_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml b/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml new file mode 100644 index 00000000..8dd68282 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: tracking_shuffled_objects +include: ../multiple_choice_template_yaml +task: bigbench_tracking_shuffled_objects_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml b/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml new file mode 100644 index 00000000..d85d63b1 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: understanding_fables +include: ../multiple_choice_template_yaml +task: bigbench_understanding_fables_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml b/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml new file mode 100644 index 00000000..0e92a41f --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: undo_permutation +include: ../multiple_choice_template_yaml +task: bigbench_undo_permutation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml b/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml new file mode 100644 index 00000000..b4d421e2 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unit_conversion +include: ../multiple_choice_template_yaml +task: bigbench_unit_conversion_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml b/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml new file mode 100644 index 00000000..eb60bc42 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unit_interpretation +include: ../multiple_choice_template_yaml +task: bigbench_unit_interpretation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml b/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml new file mode 100644 index 00000000..47c5b755 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unnatural_in_context_learning +include: ../multiple_choice_template_yaml +task: bigbench_unnatural_in_context_learning_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml b/lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml new file mode 100644 index 00000000..3ddb5e69 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: vitaminc_fact_verification +include: ../multiple_choice_template_yaml +task: bigbench_vitaminc_fact_verification_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml b/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml new file mode 100644 index 00000000..dda9a695 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: what_is_the_tao +include: ../multiple_choice_template_yaml +task: bigbench_what_is_the_tao_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml b/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml new file mode 100644 index 00000000..a6a5bbbf --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: which_wiki_edit +include: ../multiple_choice_template_yaml +task: bigbench_which_wiki_edit_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml b/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml new file mode 100644 index 00000000..0b0a858c --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: winowhy +include: ../multiple_choice_template_yaml +task: bigbench_winowhy_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml b/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml new file mode 100644 index 00000000..c244f547 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_sorting +include: ../multiple_choice_template_yaml +task: bigbench_word_sorting_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml b/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml new file mode 100644 index 00000000..a993ef33 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_unscrambling +include: ../multiple_choice_template_yaml +task: bigbench_word_unscrambling_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice_template_yaml b/lm_eval/tasks/bigbench/multiple_choice_template_yaml new file mode 100644 index 00000000..3dd2af61 --- /dev/null +++ b/lm_eval/tasks/bigbench/multiple_choice_template_yaml @@ -0,0 +1,10 @@ +group: bigbench +dataset_path: bigbench +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: inputs +doc_to_target: "{{multiple_choice_targets.index(targets[0])}}" +doc_to_choice: "{{multiple_choice_targets}}" +metric_list: + - metric: acc -- GitLab From 892f40a98e1bdc6ee7221b7900d78abd837e3186 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 13 Sep 2023 14:36:33 +0000 Subject: [PATCH 158/264] add comments --- lm_eval/api/task.py | 142 ++++++++++++++++++++++--------------------- lm_eval/evaluator.py | 31 +++++----- 2 files changed, 88 insertions(+), 85 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 687d4263..ac4e1532 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -250,6 +250,11 @@ class Task(abc.ABC): download_mode=download_mode, ) + @property + def config(self): + """Returns the TaskConfig associated with this class.""" + return self._config + @abc.abstractmethod def has_training_docs(self): """Whether the task has a training set""" @@ -352,7 +357,7 @@ class Task(abc.ABC): ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!" eval_logger.info( - f"Building contexts for task '{self._config.task}' on rank {rank}..." + f"Building contexts for task '{self.config.task}' on rank {rank}..." ) instances = [] @@ -362,14 +367,14 @@ class Task(abc.ABC): # sample fewshot context #TODO: need to offset doc_id by rank now! fewshot_ctx = self.fewshot_context( doc, - self._config.num_fewshot, + self.config.num_fewshot, ) - # TODO: we should override self._config.repeats if doing greedy gen so users don't waste time+compute + # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute inst = self.construct_requests( doc=doc, ctx=fewshot_ctx, - metadata=(self._config["task"], doc_id, self._config.repeats), + metadata=(self.config["task"], doc_id, self.config.repeats), ) if not isinstance(inst, list): @@ -457,9 +462,9 @@ class Task(abc.ABC): if num_fewshot == 0: # always prepend the (possibly empty) task description - labeled_examples = self._config.description + labeled_examples = self.config.description else: - labeled_examples = self._config.description + self.sampler.get_context( + labeled_examples = self.config.description + self.sampler.get_context( doc, num_fewshot ) @@ -469,7 +474,7 @@ class Task(abc.ABC): elif type(example) == list: return [labeled_examples + ex for ex in example] elif type(example) == int: - if self._config.doc_to_choice is not None: + if self.config.doc_to_choice is not None: choices = self.doc_to_choice(doc) return labeled_examples + choices[example] else: @@ -491,7 +496,7 @@ class Task(abc.ABC): """ # TODO: this should only return the overrides applied to a non-YAML task's configuration. # (num_fewshot) - return self._config.to_dict() + return self.config.to_dict() class ConfigurableTask(Task): @@ -506,35 +511,35 @@ class ConfigurableTask(Task): self._config = self.CONFIG # Use new configurations if there was no preconfiguration - if self._config is None: + if self.config is None: self._config = TaskConfig(**config) # Overwrite configs else: if config is not None: self._config.__dict__.update(config) - if self._config is None: + if self.config is None: raise ValueError( "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg" ) - if self._config.output_type is not None: - assert self._config.output_type in ALL_OUTPUT_TYPES - self.OUTPUT_TYPE = self._config.output_type + if self.config.output_type is not None: + assert self.config.output_type in ALL_OUTPUT_TYPES + self.OUTPUT_TYPE = self.config.output_type - if self._config.dataset_path is not None: - self.DATASET_PATH = self._config.dataset_path + if self.config.dataset_path is not None: + self.DATASET_PATH = self.config.dataset_path - if self._config.dataset_name is not None: - self.DATASET_NAME = self._config.dataset_name + if self.config.dataset_name is not None: + self.DATASET_NAME = self.config.dataset_name self._metric_fn_list = {} self._metric_fn_kwargs = {} self._aggregation_list = {} self._higher_is_better = {} - _metric_list = DEFAULT_METRIC_REGISTRY[self._config.output_type] - if self._config.metric_list is None: + _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type] + if self.config.metric_list is None: # TODO: handle this in TaskConfig.__post_init__ ? for metric_name in _metric_list: self._metric_fn_list[metric_name] = get_metric(metric_name) @@ -543,7 +548,7 @@ class ConfigurableTask(Task): ) self._higher_is_better[metric_name] = is_higher_better(metric_name) else: - for metric_config in self._config.metric_list: + for metric_config in self.config.metric_list: assert "metric" in metric_config metric_name = metric_config["metric"] kwargs = { @@ -552,7 +557,7 @@ class ConfigurableTask(Task): if key not in ["metric", "aggregation", "higher_is_better"] } - if self._config.process_results is not None: + if self.config.process_results is not None: self._metric_fn_list[metric_name] = None self._metric_fn_kwargs[metric_name] = {} elif callable(metric_name): @@ -594,13 +599,13 @@ class ConfigurableTask(Task): ) self._higher_is_better[metric_name] = is_higher_better(metric_name) - self.download(self._config.dataset_kwargs) + self.download(self.config.dataset_kwargs) self._training_docs = None self._fewshot_docs = None - if self._config.filter_list is not None: + if self.config.filter_list is not None: self._filters = [] - for filter_config in self._config.filter_list: + for filter_config in self.config.filter_list: for filter_pipeline in filter_config: filter_name = filter_config["name"] filter_functions = filter_config["filter"] @@ -615,10 +620,10 @@ class ConfigurableTask(Task): else: self._filters = [build_filter_ensemble("none", [["take_first", None]])] - if self._config.use_prompt is not None: - eval_logger.info(f"loading prompt {self._config.use_prompt}") + if self.config.use_prompt is not None: + eval_logger.info(f"loading prompt {self.config.use_prompt}") self.prompt = get_prompt( - self._config.use_prompt, self.DATASET_PATH, self.DATASET_NAME + self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME ) else: self.prompt = None @@ -645,7 +650,7 @@ class ConfigurableTask(Task): test_text = self.doc_to_text(test_doc) test_target = self.doc_to_target(test_doc) - if self._config.doc_to_choice is not None: + if self.config.doc_to_choice is not None: test_choice = self.doc_to_choice(test_doc) if type(test_choice) is not list: eval_logger.error("doc_to_choice must return list") @@ -671,9 +676,9 @@ class ConfigurableTask(Task): check_choices = [test_target] for choice in check_choices: - choice_has_whitespace = True if choice.startswith(" ") or choice.endswith(" ") else False + choice_has_whitespace = True if " " in choice else False delimiter_has_whitespace = ( - True if (self._config.target_delimiter.startswith(" ") or self._config.target_delimiter.endswith(" ")) else False + True if " " in self.config.target_delimiter else False ) if delimiter_has_whitespace and choice_has_whitespace: @@ -693,52 +698,52 @@ class ConfigurableTask(Task): ) def has_training_docs(self) -> bool: - if self._config.training_split is not None: + if self.config.training_split is not None: return True else: return False def has_validation_docs(self) -> bool: - if self._config.validation_split is not None: + if self.config.validation_split is not None: return True else: return False def has_test_docs(self) -> bool: - if self._config.test_split is not None: + if self.config.test_split is not None: return True else: return False def training_docs(self) -> datasets.Dataset: if self.has_training_docs(): - if self._config.process_docs is not None: - return self._config.process_docs( - self.dataset[self._config.training_split] + if self.config.process_docs is not None: + return self.config.process_docs( + self.dataset[self.config.training_split] ) - return self.dataset[self._config.training_split] + return self.dataset[self.config.training_split] def validation_docs(self) -> datasets.Dataset: if self.has_validation_docs(): - if self._config.process_docs is not None: - return self._config.process_docs( - self.dataset[self._config.validation_split] + if self.config.process_docs is not None: + return self.config.process_docs( + self.dataset[self.config.validation_split] ) - return self.dataset[self._config.validation_split] + return self.dataset[self.config.validation_split] def test_docs(self) -> datasets.Dataset: if self.has_test_docs(): - if self._config.process_docs is not None: - return self._config.process_docs(self.dataset[self._config.test_split]) - return self.dataset[self._config.test_split] + if self.config.process_docs is not None: + return self.config.process_docs(self.dataset[self.config.test_split]) + return self.dataset[self.config.test_split] def fewshot_docs(self): - if self._config.fewshot_split is not None: - return self.dataset[self._config.fewshot_split] + if self.config.fewshot_split is not None: + return self.dataset[self.config.fewshot_split] else: - if self._config.num_fewshot > 0: + if self.config.num_fewshot > 0: eval_logger.warning( - f"Task '{self._config.task}': " + f"Task '{self.config.task}': " "num_fewshot > 0 but fewshot_split is None. " "using preconfigured rule." ) @@ -754,15 +759,15 @@ class ConfigurableTask(Task): return self._instances def should_decontaminate(self): - return self._config.should_decontaminate + return self.config.should_decontaminate def doc_to_decontamination_query(self, doc): - if self._config.should_decontaminate: - if self._config.doc_to_decontamination_query in self.features: - return doc[self._config.doc_to_decontamination_query] + if self.config.should_decontaminate: + if self.config.doc_to_decontamination_query in self.features: + return doc[self.config.doc_to_decontamination_query] else: return ast.literal_eval( - utils.apply_template(self._config.doc_to_decontamination_query, doc) + utils.apply_template(self.config.doc_to_decontamination_query, doc) ) def _process_doc(self, doc): @@ -780,13 +785,13 @@ class ConfigurableTask(Task): if self.prompt is not None: doc_to_text = self.prompt else: - doc_to_text = self._config.doc_to_text + doc_to_text = self.config.doc_to_text if type(doc_to_text) == int: return doc_to_text elif type(doc_to_text) == str: if doc_to_text in self.features: - # if self._config.doc_to_choice is not None: + # if self.config.doc_to_choice is not None: # return self.doc_to_choice(doc)[doc[doc_to_text]] # else: return doc[doc_to_text] @@ -805,7 +810,7 @@ class ConfigurableTask(Task): return applied_prompt[0] else: eval_logger.warning("Applied prompt returns empty string") - return self._config.fewshot_delimiter + return self.config.fewshot_delimiter else: print(type(doc_to_text)) raise TypeError @@ -814,13 +819,13 @@ class ConfigurableTask(Task): if self.prompt is not None: doc_to_target = self.prompt else: - doc_to_target = self._config.doc_to_target + doc_to_target = self.config.doc_to_target if type(doc_to_target) == int: return doc_to_target elif type(doc_to_target) == str: if doc_to_target in self.features: - # if self._config.doc_to_choice is not None: + # if self.config.doc_to_choice is not None: # return self.doc_to_choice(doc)[doc[doc_to_target]] # else: return doc[doc_to_target] @@ -847,17 +852,17 @@ class ConfigurableTask(Task): return applied_prompt[1] else: eval_logger.warning("Applied prompt returns empty string") - return self._config.fewshot_delimiter + return self.config.fewshot_delimiter else: raise TypeError def doc_to_choice(self, doc: Any) -> List[str]: if self.prompt is not None: doc_to_choice = self.prompt - elif self._config.doc_to_choice is None: + elif self.config.doc_to_choice is None: eval_logger.error("doc_to_choice was called but not set in config") else: - doc_to_choice = self._config.doc_to_choice + doc_to_choice = self.config.doc_to_choice if type(doc_to_choice) == str: return ast.literal_eval(utils.apply_template(doc_to_choice, doc)) @@ -878,8 +883,8 @@ class ConfigurableTask(Task): # in multiple_choice tasks, this should be castable to an int corresponding to the index # within the answer choices, while doc_to_target is the string version of {{answer_choices[gold]}}. - if self._config.gold_alias is not None: - doc_to_target = self._config.gold_alias + if self.config.gold_alias is not None: + doc_to_target = self.config.gold_alias else: return self.doc_to_target(doc) @@ -901,7 +906,7 @@ class ConfigurableTask(Task): arguments = (self.doc_to_target(doc),) elif self.OUTPUT_TYPE == "multiple_choice": choices = self.doc_to_choice(doc) - target_delimiter = self._config.target_delimiter + target_delimiter = self.config.target_delimiter if self.multiple_input: # If there are multiple inputs, choices are placed in the ctx cont = self.doc_to_target(doc) @@ -943,15 +948,16 @@ class ConfigurableTask(Task): return request_list elif self.OUTPUT_TYPE == "greedy_until": - arguments = (ctx, self._config.generation_kwargs) + arguments = (ctx, self.config.generation_kwargs) return Instance( request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs ) def process_results(self, doc, results): - if callable(self._config.process_results): - return self._config.process_results(doc, results) + + if callable(self.config.process_results): + return self.config.process_results(doc, results) result_dict = {} use_metric = list(self._metric_fn_list.keys()) @@ -1056,7 +1062,7 @@ class ConfigurableTask(Task): elif self.OUTPUT_TYPE == "greedy_until": gold = self.doc_to_target(doc) - if self._config.doc_to_choice is not None: + if self.config.doc_to_choice is not None: # If you set doc_to_choice, # it assumes that doc_to_target returns a number. choices = self.doc_to_choice(doc) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 515cfd11..820e5bdb 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -218,11 +218,11 @@ def evaluate( # stores the amount to pad out reqs per req. type so that # number of fwd passes per distributed rank is equal padding_requests = collections.defaultdict(int) - + # store the hierarchy to do proper ordering task_hierarchy = collections.defaultdict(list) - + # store the ordering of tasks and groups task_order = collections.defaultdict(int) - + # store the aggregation for aggregating across tasks in the same group sample_agg_fn = collections.defaultdict(dict) # get lists of each type of request @@ -437,7 +437,7 @@ def evaluate( task_to_group[task].append(group) else: task_to_group[task] = [group] - + ### Aggregate results over all datapoints ### # aggregate results ; run bootstrap CIs for (task_name, key, metric), items in vals.items(): @@ -459,7 +459,7 @@ def evaluate( results[grouping][metric_key].append(task_score) else: results[grouping][metric_key] = [task_score] - + if sample_metric_key in results[grouping]: results[grouping][sample_metric_key] += items else: @@ -486,36 +486,33 @@ def evaluate( for metric in results[task_or_group].keys(): if type(results[task_or_group][metric]) == list: if "(sample agg)" in metric: - results[task_or_group][metric] = sample_agg_fn[task_or_group][metric](results[task_or_group][metric]) + results[task_or_group][metric] = sample_agg_fn[ + task_or_group + ][metric](results[task_or_group][metric]) else: - results[task_or_group][metric] = np.average(results[task_or_group][metric]) + results[task_or_group][metric] = np.average( + results[task_or_group][metric] + ) versions[task_or_group] = "N/A" - for task_name, task in task_dict.items(): if type(task) == tuple: group_name, task = task order = task_order[group_name] - tabbed_name = "-"*order+group_name + tabbed_name = "-" * order + group_name results_agg[tabbed_name] = results[group_name] versions[tabbed_name] = versions[group_name] if order == 0: groups_agg[group_name] = results[group_name] order = task_order[task_name] - tabbed_name = "-"*order+task_name + tabbed_name = "-" * order + task_name results_agg[tabbed_name] = results[task_name] versions[tabbed_name] = versions[task_name] results_dict = { "results": dict(results_agg.items()), - **( - { - "groups": dict(groups_agg.items()) - } - if bool(groups_agg) - else {} - ), + **({"groups": dict(groups_agg.items())} if bool(groups_agg) else {}), "configs": dict(sorted(configs.items())), "versions": dict(sorted(versions.items())), } -- GitLab From 020abc86b29f2fb772602ed9a2c018c667301fc3 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 13 Sep 2023 15:44:21 +0000 Subject: [PATCH 159/264] add qasper in scrolls --- lm_eval/tasks/scrolls/metrics.py | 34 +++++++++++++++++++ .../tasks/scrolls/qasper/boolean_task.yaml | 15 ++++++++ .../tasks/scrolls/qasper/freeform_task.yaml | 14 ++++++++ 3 files changed, 63 insertions(+) create mode 100644 lm_eval/tasks/scrolls/metrics.py create mode 100644 lm_eval/tasks/scrolls/qasper/boolean_task.yaml create mode 100644 lm_eval/tasks/scrolls/qasper/freeform_task.yaml diff --git a/lm_eval/tasks/scrolls/metrics.py b/lm_eval/tasks/scrolls/metrics.py new file mode 100644 index 00000000..adb437bd --- /dev/null +++ b/lm_eval/tasks/scrolls/metrics.py @@ -0,0 +1,34 @@ +import evaluate + +rouge_fn = evaluate.load('rouge') + +def rouge1(predictions, references): + results = rouge_fn.compute(predictions=predictions, references=references) + return results['rouge1'] + +def rouge2(predictions, references): + results = rouge_fn.compute(predictions=predictions, references=references) + return results['rouge2'] + +def rougeL(predictions, references): + results = rouge_fn.compute(predictions=predictions, references=references) + return results['rougeL'] + +squad_metric = evaluate.load("squad_v2") + +def agg_f1(samples): + predictions, references = zip(*samples) # unzip, if you will + computed = squad_metric.compute(predictions=predictions, references=references) + return computed["f1"] + + +def _download_metric(): + import os + import shutil + from huggingface_hub import hf_hub_download + scrolls_metric_path = hf_hub_download(repo_id="tau/scrolls", repo_type="dataset", filename="metrics/scrolls.py") + updated_scrolls_metric_path = ( + os.path.dirname(scrolls_metric_path) + os.path.basename(scrolls_metric_path).replace(".", "_") + ".py" + ) + shutil.copy(scrolls_metric_path, updated_scrolls_metric_path) + return updated_scrolls_metric_path \ No newline at end of file diff --git a/lm_eval/tasks/scrolls/qasper/boolean_task.yaml b/lm_eval/tasks/scrolls/qasper/boolean_task.yaml new file mode 100644 index 00000000..f765d1b8 --- /dev/null +++ b/lm_eval/tasks/scrolls/qasper/boolean_task.yaml @@ -0,0 +1,15 @@ +group: scrolls +task: scrolls_qasper_boolean +dataset_path: tau/scrolls +dataset_name: qasper +output_type: multiple_choice +training_split: train +validation_split: validation +process_docs: !function ../preprocessors.process_docs_prepended_question +doc_to_text: "{{text}}\n\nHypothesis: {{question}}\nConclusion:" +doc_to_target: "{{outputs[0]}}" +doc_to_choice: ["yes", "no"] +should_decontaminate: true +doc_to_decontamination_query: input +metric_list: + - metric: f1 diff --git a/lm_eval/tasks/scrolls/qasper/freeform_task.yaml b/lm_eval/tasks/scrolls/qasper/freeform_task.yaml new file mode 100644 index 00000000..1e3c64a4 --- /dev/null +++ b/lm_eval/tasks/scrolls/qasper/freeform_task.yaml @@ -0,0 +1,14 @@ +group: scrolls +task: scrolls_qasper_freeform +dataset_path: tau/scrolls +dataset_name: qasper +output_type: greedy_until +training_split: train +validation_split: validation +process_docs: !function ../preprocessors.process_docs_prepended_question +doc_to_text: "{{text}}\n\nHypothesis: {{question}}\nConclusion:" +doc_to_target: "{{outputs[0]}}" +should_decontaminate: true +doc_to_decontamination_query: input +metric_list: + - metric: f1 -- GitLab From bd81b8c0f54e79bdf82887a7083d056dd399d95f Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Thu, 14 Sep 2023 00:37:13 +0500 Subject: [PATCH 160/264] Update device list and dtype detection for MPS --- lm_eval/api/model.py | 26 +++++++++++++++++++++++--- lm_eval/models/huggingface.py | 9 ++++++--- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/lm_eval/api/model.py b/lm_eval/api/model.py index 2bcce8dc..42ef1c74 100644 --- a/lm_eval/api/model.py +++ b/lm_eval/api/model.py @@ -1,7 +1,8 @@ import abc import os -from typing import Union, List, Tuple +import torch +from typing import Union, List, Tuple, Optional, Type, TypeVar from sqlitedict import SqliteDict import json import hashlib @@ -11,6 +12,8 @@ from tqdm import tqdm from lm_eval import utils from lm_eval.logger import eval_logger +T = TypeVar("T", bound="LM") + class LM(abc.ABC): def __init__(self) -> None: @@ -111,11 +114,28 @@ class LM(abc.ABC): pass @classmethod - def create_from_arg_string(cls, arg_string, additional_config=None): + def create_from_arg_string( + cls: Type[T], arg_string: str, additional_config: Optional[dict] = None + ) -> T: + """ + Creates an instance of the LM class using the given argument string and additional config. + + Parameters: + - arg_string: A string containing arguments in the format key1=value1,key2=value2. + - additional_config: Optional dictionary containing additional configuration parameters. + + Returns: + - Instance of the LM class. + """ additional_config = {} if additional_config is None else additional_config args = utils.simple_parse_args_string(arg_string) args2 = {k: v for k, v in additional_config.items() if v is not None} - if args2.get("device") == "mps" or args.get("device") == "mps": + # TODO: delete once float16 MPS is fixed in torch stable + if ( + args2.get("device") in ("mps", "mps:0") + or args.get("device") in ("mps", "mps:0") + and "dev" not in torch.__version__ + ): args["dtype"] = "float32" return cls(**args, **args2) diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index e960654d..8f3bd963 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -107,17 +107,20 @@ class HFLM(LM): if not (parallelize or accelerator.num_processes > 1): # use user-passed device device_list = set( - ["cuda", "cpu", "mps"] + ["cuda", "cpu"] + [f"cuda:{i}" for i in range(torch.cuda.device_count())] + + ["mps", "mps:0"] ) if device: if device not in device_list: device = int(device) self._device = torch.device(device) eval_logger.info(f"Using device '{device}'") - if device == "mps": + if device in ("mps", "mps:0") and "dev" not in torch.__version__: eval_logger.info( - "MPS is still in beta and only supports float32; setting dtype to float32." + "MPS: Setting dtype to float32. To use float16 with MPS, please install a nightly build of " + "PyTorch: pip3 install --pre torch torchvision torchaudio --index-url " + "https://download.pytorch.org/whl/nightly/cpu" ) else: eval_logger.info("Device not specified") -- GitLab From 40c9f9cb71926f5802943d007c93e4a52b0a5240 Mon Sep 17 00:00:00 2001 From: Ethan Smith Date: Wed, 13 Sep 2023 20:06:53 -0700 Subject: [PATCH 161/264] Switch to pyproject.toml based project metadata --- pyproject.toml | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++ setup.py | 77 ++------------------------------------------ 2 files changed, 88 insertions(+), 75 deletions(-) create mode 100644 pyproject.toml diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..fa13c8d2 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,86 @@ +[build-system] +requires = ["setuptools>=40.8.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "lm_eval" +version = "1.0.0" +authors = [ + {name="EleutherAI", email="contact@eleuther.ai"} +] +description = "A framework for evaluating language models" +readme = "README.md" +classifiers = [ + "Development Status :: 3 - Alpha", + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] +requires-python = ">=3.9" +license = { "text" = "MIT" } +dependencies = [ + "accelerate>=0.21.0", + "evaluate", + "datasets>=2.0.0", + "evaluate>=0.4.0", + "jsonlines", + "numexpr", + "omegaconf>=2.2", + "peft>=0.2.0", + "pybind11>=2.6.2", + "pycountry", + "pytablewriter", + "rouge-score>=0.0.4", + "sacrebleu>=1.5.0", + "scikit-learn>=0.24.1", + "sqlitedict", + "torch>=1.8", + "tqdm-multiprocess", + "transformers>=4.1", + "zstandard", +] + +[tool.setuptools] +packages = ["lm_eval"] + +# required to include yaml files in pip installation +[tool.setuptools.package-data] +lm_eval = ["**/*.yaml", "tasks/**/*"] +examples = ["**/*.yaml"] + +[project.scripts] +lm-eval = "main:main" +lm_eval = "main:main" + +[project.urls] +Homepage = "https://github.com/EleutherAI/lm-evaluation-harness" +Repository = "https://github.com/EleutherAI/lm-evaluation-harness" + +[project.optional-dependencies] +dev = ["black", "flake8", "pre-commit", "pytest", "pytest-cov"] +linting = [ + "flake8", + "pylint", + "mypy", + "pre-commit", +] +testing = ["pytest", "pytest-cov", "pytest-xdist"] +multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1"] +sentencepiece = ["sentencepiece>=0.1.98", "protobuf>=4.22.1", "pycountry"] +promptsource = [ + "promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource" +] +gptq = ["auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"] +anthropic = ["anthropic"] +openai = ["openai", "tiktoken"] +all = [ + "lm_eval[dev]", + "lm_eval[testing]", + "lm_eval[linting]", + "lm_eval[multilingual]", + "lm_eval[sentencepiece]", + "lm_eval[promptsource]", + "lm_eval[gptq]", + "lm_eval[anthropic]", + "lm_eval[openai]" +] diff --git a/setup.py b/setup.py index cf8843ad..dbe4675d 100644 --- a/setup.py +++ b/setup.py @@ -1,77 +1,4 @@ import setuptools -import itertools -with open("README.md", "r", encoding="utf-8") as fh: - long_description = fh.read() - - -extras_require = { - "dev": ["black", "flake8", "pre-commit", "pytest", "pytest-cov"], - "linting": [ - "flake8", - "pylint", - "mypy", - "pre-commit", - ], - "testing": ["pytest", "pytest-cov", "pytest-xdist"], - "multilingual": ["nagisa>=0.2.7", "jieba>=0.42.1"], - "sentencepiece": ["sentencepiece>=0.1.98", "protobuf>=4.22.1"], - "promptsource": [ - "promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource" - ], - "gptq": ["auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"], - "anthropic": ["anthropic"], - "openai": ["openai", "tiktoken"], -} -extras_require["all"] = list(itertools.chain.from_iterable(extras_require.values())) - - -setuptools.setup( - name="lm_eval", - version="1.0.0", - author="EleutherAI", - author_email="contact@eleuther.ai", - description="A framework for evaluating language models", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/EleutherAI/lm-evaluation-harness", - packages=setuptools.find_packages(), - # required to include yaml files in pip installation - package_data={ - "lm_eval": ["**/*.yaml", "tasks/**/*"], - "examples": ["**/*.yaml"], - }, - entry_points={ - "console_scripts": ["lm-eval = main:main", "lm_eval = main:main"], - }, - include_package_data=True, - classifiers=[ - "Development Status :: 3 - Alpha", - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - ], - python_requires=">=3.9", - install_requires=[ - "accelerate>=0.18.0", - "evaluate", - "datasets>=2.0.0", - "evaluate>=0.4.0", - "jsonlines", - "numexpr", - "omegaconf>=2.2", - "peft>=0.2.0", - "pybind11>=2.6.2", - "pycountry", - "pytablewriter", - "rouge-score>=0.0.4", - "sacrebleu==1.5.0", - "scikit-learn>=0.24.1", - "sqlitedict", - "torch>=1.8", - "tqdm-multiprocess", - "transformers>=4.1", - "zstandard", - ], - extras_require=extras_require, -) +# This is to make sure that the package supports editable installs +setuptools.setup() -- GitLab From bf038375bcd845c6d366db2d3de42e15ebab4b41 Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Thu, 14 Sep 2023 18:25:26 +0500 Subject: [PATCH 162/264] Refactor whitespace check and add handling for multiple_targets --- lm_eval/api/task.py | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index d3a4344f..014ef18b 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -674,22 +674,23 @@ class ConfigurableTask(Task): check_choices = test_choice else: check_choices = [test_target] - - for choice in check_choices: - choice_has_whitespace = True if choice.startswith(" ") else False - delimiter_has_whitespace = ( - True if self.config.target_delimiter.startswith(" ") else False - ) - - if delimiter_has_whitespace and choice_has_whitespace: - eval_logger.warning( - f'Both target_delimiter and target choice: "{choice}" have whitespace' - ) - elif (not delimiter_has_whitespace) and (not choice_has_whitespace): - eval_logger.warning( - f'Both target_delimiter and target choice: "{choice}" does not have whitespace, ignore if the language you are evaluating on does not require/use whitespace' + # TODO: is this only for multiple-choice? + if self.config.doc_to_choice is not None: + for choice in check_choices: + choice_has_whitespace = True if choice[0].isspace() else False + delimiter_has_whitespace = ( + True if self.config.target_delimiter[-1].isspace() else False ) + if delimiter_has_whitespace and choice_has_whitespace: + eval_logger.warning( + f'Both target_delimiter and target choice: "{choice}" have whitespace' + ) + elif (not delimiter_has_whitespace) and (not choice_has_whitespace): + eval_logger.warning( + f'Both target_delimiter and target choice: "{choice}" does not have whitespace, ignore if the language you are evaluating on does not require/use whitespace' + ) + def download(self, dataset_kwargs=None) -> None: self.dataset = datasets.load_dataset( path=self.DATASET_PATH, @@ -1067,6 +1068,9 @@ class ConfigurableTask(Task): # it assumes that doc_to_target returns a number. choices = self.doc_to_choice(doc) gold = choices[gold] + # we expect multiple_targets to be a list. + elif self.multiple_target: + pass else: gold = str(gold) -- GitLab From 680c568689389b302da7f32b8cbaeb4996ef4984 Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Thu, 14 Sep 2023 18:52:29 +0500 Subject: [PATCH 163/264] change prompt formatting --- lm_eval/tasks/nq_open/nq_open.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lm_eval/tasks/nq_open/nq_open.yaml b/lm_eval/tasks/nq_open/nq_open.yaml index 31d7288a..1a472151 100644 --- a/lm_eval/tasks/nq_open/nq_open.yaml +++ b/lm_eval/tasks/nq_open/nq_open.yaml @@ -3,9 +3,10 @@ dataset_path: nq_open output_type: greedy_until training_split: train validation_split: validation -description: "Answer these questions:\n\n" +description: "Answer these questions:\n" doc_to_text: "Q: {{question}}?\nA:" doc_to_target: "{{answer}}" # TODO: should be multi-target +fewshot_delimiter: "\n" generation_kwargs: until: - "\n" -- GitLab From 0ed5527a376009eb80a5eb249a347d8d01f0d6ec Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 14 Sep 2023 16:40:10 +0200 Subject: [PATCH 164/264] Fix "local variable 'docs' referenced before assignment" error in write_out.py --- scripts/write_out.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/write_out.py b/scripts/write_out.py index 018c063c..d298d49b 100644 --- a/scripts/write_out.py +++ b/scripts/write_out.py @@ -38,13 +38,15 @@ def main(): iters = [] for set in args.sets.split(","): + docs = None if set == "train" and task.has_training_docs(): docs = task.training_docs() if set == "val" and task.has_validation_docs(): docs = task.validation_docs() if set == "test" and task.has_test_docs(): docs = task.test_docs() - iters.append(docs) + if docs is not None: + iters.append(docs) docs = join_iters(iters) -- GitLab From 38c9689ae38cfa503355693f211e0a6ddf88f83e Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 14 Sep 2023 16:47:28 +0200 Subject: [PATCH 165/264] Use Dict to make the code python 3.8 compatible --- lm_eval/prompts/__init__.py | 3 ++- lm_eval/tasks/__init__.py | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/lm_eval/prompts/__init__.py b/lm_eval/prompts/__init__.py index d1048024..afbe30dd 100644 --- a/lm_eval/prompts/__init__.py +++ b/lm_eval/prompts/__init__.py @@ -1,5 +1,6 @@ import ast +from typing import Dict from lm_eval import utils from lm_eval.logger import eval_logger @@ -7,7 +8,7 @@ from lm_eval.logger import eval_logger # Stores prompts in a dictionary indexed by 2 levels: # prompt category name, and prompt name. # This allows us to access prompts -PROMPT_REGISTRY: dict[str, dict[str, str]] = { +PROMPT_REGISTRY: Dict[str, Dict[str, str]] = { "qa-basic": { "question-newline-answer": "Question: {{question}}\nAnswer:", "q-newline-a": "Q: {{question}}\nA:", diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 2f89defe..eaa0fbc4 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -1,6 +1,6 @@ import os import yaml -from typing import List, Union +from typing import List, Union, Dict from lm_eval import utils from lm_eval import prompts @@ -15,7 +15,7 @@ from lm_eval.api.registry import ( ) -def register_configurable_task(config: dict[str, str]) -> int: +def register_configurable_task(config: Dict[str, str]) -> int: SubClass = type( config["task"] + "ConfigurableTask", (ConfigurableTask,), @@ -38,7 +38,7 @@ def register_configurable_task(config: dict[str, str]) -> int: return 0 -def check_prompt_config(config: dict[str, str]) -> List[dict[str, str]]: +def check_prompt_config(config: Dict[str, str]) -> List[Dict[str, str]]: all_configs = [] if "use_prompt" in config: prompt_list = prompts.load_prompt_list( @@ -69,7 +69,7 @@ def check_prompt_config(config: dict[str, str]) -> List[dict[str, str]]: return all_configs -def get_task_name_from_config(task_config: dict[str, str]) -> str: +def get_task_name_from_config(task_config: Dict[str, str]) -> str: if "dataset_name" in task_config: return "{dataset_path}_{dataset_name}".format(**task_config) else: @@ -128,7 +128,7 @@ def get_task_name_from_object(task_object): # TODO: pass num_fewshot and other cmdline overrides in a better way -def get_task_dict(task_name_list: List[Union[str, dict, Task]], **kwargs): +def get_task_dict(task_name_list: List[Union[str, Dict, Task]], **kwargs): config = {**kwargs} -- GitLab From 987476389a34d9fbbc6631de8b7fd9c3cfa2e756 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Thu, 14 Sep 2023 15:36:26 +0000 Subject: [PATCH 166/264] remove omegaconf usage --- lm_eval/utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lm_eval/utils.py b/lm_eval/utils.py index fe236a74..6cf97331 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -16,7 +16,6 @@ import gc import torch import transformers -from omegaconf import OmegaConf from jinja2 import BaseLoader, Environment, StrictUndefined from itertools import islice @@ -56,7 +55,7 @@ def simple_parse_args_string(args_string): if not args_string: return {} arg_list = args_string.split(",") - args_dict = OmegaConf.to_object(OmegaConf.from_dotlist(arg_list)) + args_dict = {k: v for k, v in [arg.split("=") for arg in arg_list]} return args_dict -- GitLab From b1dd5853022bbd668cdddacf998fcd384417c107 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Thu, 14 Sep 2023 15:37:33 +0000 Subject: [PATCH 167/264] fix typo --- lm_eval/tasks/translation/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/translation/utils.py b/lm_eval/tasks/translation/utils.py index 0ed26c2c..aacc1e96 100644 --- a/lm_eval/tasks/translation/utils.py +++ b/lm_eval/tasks/translation/utils.py @@ -10,7 +10,7 @@ try: except ModuleNotFoundError: raise Exception( "`pycountry` is required for generating translation task prompt templates. \ -please install pycountry via pip install lm-eval[multilingua] or pip install -e .[multilingual]", +please install pycountry via pip install lm-eval[multilingual] or pip install -e .[multilingual]", ) -- GitLab From da43b3a239854e6b86250ab0fb28c25f276246b4 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Thu, 14 Sep 2023 15:37:58 +0000 Subject: [PATCH 168/264] remove stale deps + fix [sentencepiece] dep --- pyproject.toml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fa13c8d2..c85d6946 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,10 +25,8 @@ dependencies = [ "evaluate>=0.4.0", "jsonlines", "numexpr", - "omegaconf>=2.2", "peft>=0.2.0", "pybind11>=2.6.2", - "pycountry", "pytablewriter", "rouge-score>=0.0.4", "sacrebleu>=1.5.0", @@ -65,8 +63,8 @@ linting = [ "pre-commit", ] testing = ["pytest", "pytest-cov", "pytest-xdist"] -multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1"] -sentencepiece = ["sentencepiece>=0.1.98", "protobuf>=4.22.1", "pycountry"] +multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1", "pycountry"] +sentencepiece = ["sentencepiece>=0.1.98", "protobuf>=4.22.1"] promptsource = [ "promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource" ] -- GitLab From 1a34d08498737312a0a8211a32b020b097bb1813 Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Thu, 14 Sep 2023 23:59:00 +0500 Subject: [PATCH 169/264] add hendryck_math tasks; bugfixes --- .pre-commit-config.yaml | 4 +- lm_eval/api/samplers.py | 4 +- lm_eval/api/task.py | 9 +- lm_eval/benchmarks/hendrycks_math.yaml | 9 + lm_eval/tasks/hendrycks_math/README.md | 63 ++++ .../tasks/hendrycks_math/math_algebra.yaml | 21 ++ .../math_counting_and_prob.yaml | 3 + .../tasks/hendrycks_math/math_geometry.yaml | 3 + .../math_intermediate_algebra.yaml | 3 + .../tasks/hendrycks_math/math_num_theory.yaml | 3 + .../tasks/hendrycks_math/math_prealgebra.yaml | 3 + .../tasks/hendrycks_math/math_precalc.yaml | 3 + lm_eval/tasks/hendrycks_math/utils.py | 296 ++++++++++++++++++ 13 files changed, 418 insertions(+), 6 deletions(-) create mode 100644 lm_eval/benchmarks/hendrycks_math.yaml create mode 100644 lm_eval/tasks/hendrycks_math/README.md create mode 100644 lm_eval/tasks/hendrycks_math/math_algebra.yaml create mode 100644 lm_eval/tasks/hendrycks_math/math_counting_and_prob.yaml create mode 100644 lm_eval/tasks/hendrycks_math/math_geometry.yaml create mode 100644 lm_eval/tasks/hendrycks_math/math_intermediate_algebra.yaml create mode 100644 lm_eval/tasks/hendrycks_math/math_num_theory.yaml create mode 100644 lm_eval/tasks/hendrycks_math/math_prealgebra.yaml create mode 100644 lm_eval/tasks/hendrycks_math/math_precalc.yaml create mode 100644 lm_eval/tasks/hendrycks_math/utils.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 31ef5ca0..ecf0f31f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,7 +40,9 @@ repos: - id: codespell exclude: > (?x)^( - .*\.json|ignore.txt + .*\.json + |ignore.txt + |lm_eval/tasks/.* )$ args: [--check-filenames, --check-hidden, --ignore-words=ignore.txt] - repo: https://github.com/pre-commit/mirrors-mypy diff --git a/lm_eval/api/samplers.py b/lm_eval/api/samplers.py index 59e21f07..42b68e2c 100644 --- a/lm_eval/api/samplers.py +++ b/lm_eval/api/samplers.py @@ -46,14 +46,14 @@ class Sampler: ) + self.target_delimiter + ( - self.doc_to_target(doc)[0] + str(self.doc_to_target(doc)[0]) if type(self.doc_to_target(doc)) is list else self.doc_to_target(doc) if ( self.config.doc_to_choice is None or type(self.doc_to_target(doc)) is str ) - else self.doc_to_choice(doc)[self.doc_to_target(doc)] + else str(self.doc_to_choice(doc)[self.doc_to_target(doc)]) ) for doc in selected_docs ] diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index ac4e1532..d11a9d2c 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -581,7 +581,7 @@ class ConfigurableTask(Task): INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()} metric_agg = get_default_aggregation(metric_name) eval_logger.warning( - f"metric {metric_name} is defined, but aggregation is not. " + f"[Task: {self._config.task}] metric {metric_name} is defined, but aggregation is not. " f"using default " f"aggregation={INV_AGG_REGISTRY[metric_agg]}" ) @@ -593,7 +593,7 @@ class ConfigurableTask(Task): ] else: eval_logger.warning( - f"metric {metric_name} is defined, but higher_is_better is not. " + f"[Task: {self._config.task}] metric {metric_name} is defined, but higher_is_better is not. " f"using default " f"higher_is_better={is_higher_better(metric_name)}" ) @@ -838,7 +838,10 @@ class ConfigurableTask(Task): and (target_string[0] == "[") and (target_string[-1] == "]") ): - return ast.literal_eval(target_string) + try: + return ast.literal_eval(target_string) + except (SyntaxError, ValueError): + return target_string else: return target_string elif type(doc_to_target) == list: diff --git a/lm_eval/benchmarks/hendrycks_math.yaml b/lm_eval/benchmarks/hendrycks_math.yaml new file mode 100644 index 00000000..01ab3381 --- /dev/null +++ b/lm_eval/benchmarks/hendrycks_math.yaml @@ -0,0 +1,9 @@ +group: hendrycks_math +task: + - math_algebra + - math_counting_and_prob + - math_geometry + - math_intermediate_algebra + - math_num_theory + - math_prealgebra + - math_precalc diff --git a/lm_eval/tasks/hendrycks_math/README.md b/lm_eval/tasks/hendrycks_math/README.md new file mode 100644 index 00000000..5d4cd5e9 --- /dev/null +++ b/lm_eval/tasks/hendrycks_math/README.md @@ -0,0 +1,63 @@ +# MATH +ℹ️ This is the 4-shot variant! +## Paper +Measuring Mathematical Problem Solving With the MATH Dataset +https://arxiv.org/abs/2103.03874 + +Many intellectual endeavors require mathematical problem solving, but this skill remains beyond the capabilities of computers. To measure this ability in machine learning models, we introduce MATH, a new dataset of 12,500 challenging competition mathematics problems. Each problem in MATH has a full step-by-step solution which can be used to teach models to generate answer derivations and explanations. + +NOTE: The few-shot and the generated answer extraction is based on the [Minerva](https://arxiv.org/abs/2206.14858) paper and exact match equivalence is calculated using the `sympy` library + +Homepage: https://github.com/hendrycks/math + + +## Citation +``` +@article{hendrycksmath2021, + title={Measuring Mathematical Problem Solving With the MATH Dataset}, + author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, + journal={NeurIPS}, + year={2021} +} +``` + +### Groups, Benchmarks and Tasks + +#### Benchmarks + +- `hendrycks_math` + +#### Groups + +- `math_word_problems` +- `greedy_until` + +#### Tasks + +- `math_algebra` +- `math_counting_and_prob` +- `math_geometry` +- `math_intermediate_algebra` +- `math_num_theory` +- `math_prealgebra` +- `math_precalc` + +### Checklist + +The checklist is the following: + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + * The implementation in the original paper is one where the model is first fine-tuned on the data. They do have a few-shot evaluation for GPT-3, however the few-shot context used here is sourced from [Lewkowycz et al](https://arxiv.org/abs/2206.14858). The achieved accuracy on Llama-2 models is comparable to that provided in the paper, though not identical. + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? + +### Variant Wishlist + +- [ ] zero-shot variant diff --git a/lm_eval/tasks/hendrycks_math/math_algebra.yaml b/lm_eval/tasks/hendrycks_math/math_algebra.yaml new file mode 100644 index 00000000..bddd5eca --- /dev/null +++ b/lm_eval/tasks/hendrycks_math/math_algebra.yaml @@ -0,0 +1,21 @@ +group: + - math_word_problems +task: math_algebra +dataset_path: baber/hendrycks_math +process_docs: !function utils.process_docs +dataset_name: algebra +output_type: greedy_until +training_split: train +test_split: test +doc_to_text: !function utils.doc_to_text +process_results: !function utils.process_results +doc_to_target: "{{answer}}" +generation_kwargs: + until: + - "Problem:" + do_sample: false + temperature: 0 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true diff --git a/lm_eval/tasks/hendrycks_math/math_counting_and_prob.yaml b/lm_eval/tasks/hendrycks_math/math_counting_and_prob.yaml new file mode 100644 index 00000000..490e787c --- /dev/null +++ b/lm_eval/tasks/hendrycks_math/math_counting_and_prob.yaml @@ -0,0 +1,3 @@ +include: math_algebra.yaml +dataset_name: counting_and_probability +task: math_counting_and_prob diff --git a/lm_eval/tasks/hendrycks_math/math_geometry.yaml b/lm_eval/tasks/hendrycks_math/math_geometry.yaml new file mode 100644 index 00000000..4ab48b74 --- /dev/null +++ b/lm_eval/tasks/hendrycks_math/math_geometry.yaml @@ -0,0 +1,3 @@ +include: math_algebra.yaml +dataset_name: geometry +task: math_geometry diff --git a/lm_eval/tasks/hendrycks_math/math_intermediate_algebra.yaml b/lm_eval/tasks/hendrycks_math/math_intermediate_algebra.yaml new file mode 100644 index 00000000..d554e10a --- /dev/null +++ b/lm_eval/tasks/hendrycks_math/math_intermediate_algebra.yaml @@ -0,0 +1,3 @@ +include: math_algebra.yaml +dataset_name: intermediate_algebra +task: math_intermediate_algebra diff --git a/lm_eval/tasks/hendrycks_math/math_num_theory.yaml b/lm_eval/tasks/hendrycks_math/math_num_theory.yaml new file mode 100644 index 00000000..05bebf29 --- /dev/null +++ b/lm_eval/tasks/hendrycks_math/math_num_theory.yaml @@ -0,0 +1,3 @@ +include: math_algebra.yaml +dataset_name: number_theory +task: math_num_theory diff --git a/lm_eval/tasks/hendrycks_math/math_prealgebra.yaml b/lm_eval/tasks/hendrycks_math/math_prealgebra.yaml new file mode 100644 index 00000000..c71e1ef4 --- /dev/null +++ b/lm_eval/tasks/hendrycks_math/math_prealgebra.yaml @@ -0,0 +1,3 @@ +include: math_algebra.yaml +dataset_name: prealgebra +task: math_prealgebra diff --git a/lm_eval/tasks/hendrycks_math/math_precalc.yaml b/lm_eval/tasks/hendrycks_math/math_precalc.yaml new file mode 100644 index 00000000..4d3d067e --- /dev/null +++ b/lm_eval/tasks/hendrycks_math/math_precalc.yaml @@ -0,0 +1,3 @@ +include: math_algebra.yaml +dataset_name: precalculus +task: math_precalc diff --git a/lm_eval/tasks/hendrycks_math/utils.py b/lm_eval/tasks/hendrycks_math/utils.py new file mode 100644 index 00000000..3f9709a9 --- /dev/null +++ b/lm_eval/tasks/hendrycks_math/utils.py @@ -0,0 +1,296 @@ +import datasets +import re +import sympy +from sympy.parsing.latex import parse_latex +import signal +from lm_eval.logger import eval_logger +from typing import Optional + + +# taken from +# https://github.com/wellecks/lm-evaluation-harness/blob/master/lm_eval/tasks/minerva_math.py +def doc_to_text(doc: dict) -> str: + PROMPT = r"""Problem: +Find the domain of the expression $\frac{\sqrt{x-2}}{\sqrt{5-x}}$.} + +Solution: +The expressions inside each square root must be non-negative. Therefore, $x-2 \ge 0$, so $x\ge2$, and $5 - x \ge 0$, so $x \le 5$. Also, the denominator cannot be equal to zero, so $5-x>0$, which gives $x<5$. Therefore, the domain of the expression is $\boxed{[2,5)}$. +Final Answer: The final answer is $[2,5)$. I hope it is correct. + +Problem: +If $\det \mathbf{A} = 2$ and $\det \mathbf{B} = 12,$ then find $\det (\mathbf{A} \mathbf{B}).$ + +Solution: +We have that $\det (\mathbf{A} \mathbf{B}) = (\det \mathbf{A})(\det \mathbf{B}) = (2)(12) = \boxed{24}.$ +Final Answer: The final answer is $24$. I hope it is correct. + +Problem: +Terrell usually lifts two 20-pound weights 12 times. If he uses two 15-pound weights instead, how many times must Terrell lift them in order to lift the same total weight? + +Solution: +If Terrell lifts two 20-pound weights 12 times, he lifts a total of $2\cdot 12\cdot20=480$ pounds of weight. If he lifts two 15-pound weights instead for $n$ times, he will lift a total of $2\cdot15\cdot n=30n$ pounds of weight. Equating this to 480 pounds, we can solve for $n$: +\begin{align*} +30n&=480\\ +\Rightarrow\qquad n&=480/30=\boxed{16} +\end{align*} +Final Answer: The final answer is $16$. I hope it is correct. + +Problem: +If the system of equations + +\begin{align*} +6x-4y&=a,\\ +6y-9x &=b. +\end{align*}has a solution $(x, y)$ where $x$ and $y$ are both nonzero, +find $\frac{a}{b},$ assuming $b$ is nonzero. + +Solution: +If we multiply the first equation by $-\frac{3}{2}$, we obtain + +$$6y-9x=-\frac{3}{2}a.$$Since we also know that $6y-9x=b$, we have + +$$-\frac{3}{2}a=b\Rightarrow\frac{a}{b}=\boxed{-\frac{2}{3}}.$$ +Final Answer: The final answer is $-\frac{2}{3}$. I hope it is correct.""" + + return PROMPT + "\n\n" + "Problem:" + "\n" + doc["problem"] + "\n\n" + "Solution:" + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc: dict) -> dict: + out_doc = { + "problem": doc["problem"], + "solution": doc["solution"], + "answer": normalize_final_answer( + remove_boxed(last_boxed_only_string(doc["solution"])) + ), + } + return out_doc + + return dataset.map(_process_doc) + + +def process_results(doc: dict, results: list[str]) -> dict[str, int]: + candidates = results[0] + + unnormalized_answer = get_unnormalized_answer(candidates) + answer = normalize_final_answer(unnormalized_answer) + + if is_equiv(answer, doc["answer"]): + retval = 1 + else: + retval = 0 + + results = { + "exact_match": retval, + } + return results + + +def last_boxed_only_string(string: str) -> Optional[str]: + idx = string.rfind("\\boxed") + if "\\boxed " in string: + return "\\boxed " + string.split("\\boxed ")[-1].split("$")[0] + if idx < 0: + idx = string.rfind("\\fbox") + if idx < 0: + return None + + i = idx + right_brace_idx = None + num_left_braces_open = 0 + while i < len(string): + if string[i] == "{": + num_left_braces_open += 1 + if string[i] == "}": + num_left_braces_open -= 1 + if num_left_braces_open == 0: + right_brace_idx = i + break + i += 1 + + if right_brace_idx is None: + retval = None + else: + retval = string[idx : right_brace_idx + 1] + + return retval + + +def remove_boxed(s: str) -> str: + if "\\boxed " in s: + left = "\\boxed " + assert s[: len(left)] == left + return s[len(left) :] + + left = "\\boxed{" + + assert s[: len(left)] == left + assert s[-1] == "}" + + return s[len(left) : -1] + + +class timeout: + def __init__(self, seconds=1, error_message="Timeout"): + self.seconds = seconds + self.error_message = error_message + + def handle_timeout(self, signum, frame): + raise TimeoutError(self.error_message) + + def __enter__(self): + signal.signal(signal.SIGALRM, self.handle_timeout) + signal.alarm(self.seconds) + + def __exit__(self, type, value, traceback): + signal.alarm(0) + + +def is_equiv(x1: str, x2: str) -> bool: + """ + x1 and x2 are normalized latex string + """ + try: + with timeout(seconds=5): + try: + parsed_x1 = parse_latex(x1) + parsed_x2 = parse_latex(x2) + except ( + sympy.parsing.latex.errors.LaTeXParsingError, + sympy.SympifyError, + TypeError, + ): + eval_logger.debug(f"couldn't parse one of {x1} or {x2}") + return False + + try: + diff = parsed_x1 - parsed_x2 + except TypeError: + eval_logger.debug(f"couldn't subtract {x1} and {x2}") + return False + + try: + if sympy.simplify(diff) == 0: + return True + else: + return False + except ValueError: + eval_logger.debug( + f"Had some trouble simplifying when comparing {x1} and {x2}" + ) + except TimeoutError: + eval_logger.debug(f"Timed out comparing {x1} and {x2}") + return False + except Exception as e: + eval_logger.debug(f"Failed comparing {x1} and {x2} with {e}") + return False + + +def get_unnormalized_answer(text: str) -> str: + INVALID_ANSWER = "[invalidanswer]" + end_seq = "I hope it is correct." + text += end_seq + match = re.search( + r"Final Answer: The final answer is(.*?). I hope it is correct.", + text, + ) + if match: + return match.group(1).strip() + else: + return INVALID_ANSWER + + +SUBSTITUTIONS = [ + ("an ", ""), + ("a ", ""), + (".$", "$"), + ("\\$", ""), + (r"\ ", ""), + (" ", ""), + ("mbox", "text"), + (",\\text{and}", ","), + ("\\text{and}", ","), + ("\\text{m}", "\\text{}"), +] +REMOVED_EXPRESSIONS = [ + "square", + "ways", + "integers", + "dollars", + "mph", + "inches", + "ft", + "hours", + "km", + "units", + "\\ldots", + "sue", + "points", + "feet", + "minutes", + "digits", + "cents", + "degrees", + "cm", + "gm", + "pounds", + "meters", + "meals", + "edges", + "students", + "childrentickets", + "multiples", + "\\text{s}", + "\\text{.}", + "\\text{\ns}", + "\\text{}^2", + "\\text{}^3", + "\\text{\n}", + "\\text{}", + r"\mathrm{th}", + r"^\circ", + r"^{\circ}", + r"\;", + r",\!", + "{,}", + '"', + "\\dots", +] + + +def normalize_final_answer(final_answer: str) -> str: + """ + Normalize a final answer to a quantitative reasoning question. + + Copied character for character from appendix D of Lewkowycz et al. (2022) + """ + final_answer = final_answer.split("=")[-1] + + for before, after in SUBSTITUTIONS: + final_answer = final_answer.replace(before, after) + for expr in REMOVED_EXPRESSIONS: + final_answer = final_answer.replace(expr, "") + + # Extract answer that is in LaTeX math, is bold, + # is surrounded by a box, etc. + final_answer = re.sub(r"(.*?)(\$)(.*?)(\$)(.*)", "$\\3$", final_answer) + final_answer = re.sub(r"(\\text\{)(.*?)(\})", "\\2", final_answer) + final_answer = re.sub(r"(\\textbf\{)(.*?)(\})", "\\2", final_answer) + final_answer = re.sub(r"(\\overline\{)(.*?)(\})", "\\2", final_answer) + final_answer = re.sub(r"(\\boxed\{)(.*)(\})", "\\2", final_answer) + + # Normalize shorthand TeX: + # \fracab -> \frac{a}{b} + # \frac{abc}{bef} -> \frac{abc}{bef} + # \fracabc -> \frac{a}{b}c + # \sqrta -> \sqrt{a} + # \sqrtab -> sqrt{a}b + final_answer = re.sub(r"(frac)([^{])(.)", "frac{\\2}{\\3}", final_answer) + final_answer = re.sub(r"(sqrt)([^{])", "sqrt{\\2}", final_answer) + final_answer = final_answer.replace("$", "") + + # Normalize 100,000 -> 100000 + if final_answer.replace(",", "").isdigit(): + final_answer = final_answer.replace(",", "") + + return final_answer -- GitLab From f6c81476cee118f11e572d610a96db1cd62a5218 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Thu, 14 Sep 2023 19:41:45 +0000 Subject: [PATCH 170/264] add python 3.8 to supported versions in pyproject.toml and to CPU unit tests --- .github/workflows/unit_tests.yml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index be9f3fa9..8144488d 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -49,7 +49,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [ "3.9", "3.10", "3.11" ] + python-version: ["3.8", "3.9", "3.10", "3.11" ] timeout-minutes: 30 steps: diff --git a/pyproject.toml b/pyproject.toml index fa13c8d2..7ac5b6b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ classifiers = [ "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ] -requires-python = ">=3.9" +requires-python = ">=3.8" license = { "text" = "MIT" } dependencies = [ "accelerate>=0.21.0", -- GitLab From 5794ec3cc41e423a33a1ee31d27c18207ab95510 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Thu, 14 Sep 2023 19:52:08 +0000 Subject: [PATCH 171/264] update python ver. in mypy.ini? --- mypy.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mypy.ini b/mypy.ini index e6c70b0d..76a0c864 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,5 +1,5 @@ [mypy] -python_version = 3.9 +python_version = 3.8 show_traceback = True check_untyped_defs = True no_implicit_reexport = True -- GitLab From 5f205bd557a38c49bcf5c6b6476677ce89afd610 Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Fri, 15 Sep 2023 01:51:49 +0500 Subject: [PATCH 172/264] list(gold) if `multiple_target` --- lm_eval/api/task.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 014ef18b..95c08af4 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -674,7 +674,6 @@ class ConfigurableTask(Task): check_choices = test_choice else: check_choices = [test_target] - # TODO: is this only for multiple-choice? if self.config.doc_to_choice is not None: for choice in check_choices: choice_has_whitespace = True if choice[0].isspace() else False @@ -1070,7 +1069,7 @@ class ConfigurableTask(Task): gold = choices[gold] # we expect multiple_targets to be a list. elif self.multiple_target: - pass + gold = list(gold) else: gold = str(gold) -- GitLab From a554e41be7791a3545d9d4b746177ecb6d976ba9 Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Fri, 15 Sep 2023 02:06:20 +0500 Subject: [PATCH 173/264] refactored hendrycks_math to minerva_math --- lm_eval/benchmarks/hendrycks_math.yaml | 9 ------- lm_eval/benchmarks/minerva_math.yaml | 9 +++++++ .../math_counting_and_prob.yaml | 3 --- .../tasks/hendrycks_math/math_geometry.yaml | 3 --- .../math_intermediate_algebra.yaml | 3 --- .../tasks/hendrycks_math/math_num_theory.yaml | 3 --- .../tasks/hendrycks_math/math_prealgebra.yaml | 3 --- .../tasks/hendrycks_math/math_precalc.yaml | 3 --- .../README.md | 25 ++++++++++++------- .../minerva_math_algebra.yaml} | 4 +-- .../minerva_math_counting_and_prob.yaml | 3 +++ .../minerva_math/minerva_math_geometry.yaml | 3 +++ .../minerva_math_intermediate_algebra.yaml | 3 +++ .../minerva_math/minerva_math_num_theory.yaml | 3 +++ .../minerva_math/minerva_math_prealgebra.yaml | 3 +++ .../minerva_math/minerva_math_precalc.yaml | 3 +++ .../{hendrycks_math => minerva_math}/utils.py | 0 17 files changed, 45 insertions(+), 38 deletions(-) delete mode 100644 lm_eval/benchmarks/hendrycks_math.yaml create mode 100644 lm_eval/benchmarks/minerva_math.yaml delete mode 100644 lm_eval/tasks/hendrycks_math/math_counting_and_prob.yaml delete mode 100644 lm_eval/tasks/hendrycks_math/math_geometry.yaml delete mode 100644 lm_eval/tasks/hendrycks_math/math_intermediate_algebra.yaml delete mode 100644 lm_eval/tasks/hendrycks_math/math_num_theory.yaml delete mode 100644 lm_eval/tasks/hendrycks_math/math_prealgebra.yaml delete mode 100644 lm_eval/tasks/hendrycks_math/math_precalc.yaml rename lm_eval/tasks/{hendrycks_math => minerva_math}/README.md (74%) rename lm_eval/tasks/{hendrycks_math/math_algebra.yaml => minerva_math/minerva_math_algebra.yaml} (86%) create mode 100644 lm_eval/tasks/minerva_math/minerva_math_counting_and_prob.yaml create mode 100644 lm_eval/tasks/minerva_math/minerva_math_geometry.yaml create mode 100644 lm_eval/tasks/minerva_math/minerva_math_intermediate_algebra.yaml create mode 100644 lm_eval/tasks/minerva_math/minerva_math_num_theory.yaml create mode 100644 lm_eval/tasks/minerva_math/minerva_math_prealgebra.yaml create mode 100644 lm_eval/tasks/minerva_math/minerva_math_precalc.yaml rename lm_eval/tasks/{hendrycks_math => minerva_math}/utils.py (100%) diff --git a/lm_eval/benchmarks/hendrycks_math.yaml b/lm_eval/benchmarks/hendrycks_math.yaml deleted file mode 100644 index 01ab3381..00000000 --- a/lm_eval/benchmarks/hendrycks_math.yaml +++ /dev/null @@ -1,9 +0,0 @@ -group: hendrycks_math -task: - - math_algebra - - math_counting_and_prob - - math_geometry - - math_intermediate_algebra - - math_num_theory - - math_prealgebra - - math_precalc diff --git a/lm_eval/benchmarks/minerva_math.yaml b/lm_eval/benchmarks/minerva_math.yaml new file mode 100644 index 00000000..6df3203e --- /dev/null +++ b/lm_eval/benchmarks/minerva_math.yaml @@ -0,0 +1,9 @@ +group: minerva_math +task: + - minerva_math_algebra + - minerva_math_counting_and_prob + - minerva_math_geometry + - minerva_math_intermediate_algebra + - minerva_math_num_theory + - minerva_math_prealgebra + - minerva_math_precalc diff --git a/lm_eval/tasks/hendrycks_math/math_counting_and_prob.yaml b/lm_eval/tasks/hendrycks_math/math_counting_and_prob.yaml deleted file mode 100644 index 490e787c..00000000 --- a/lm_eval/tasks/hendrycks_math/math_counting_and_prob.yaml +++ /dev/null @@ -1,3 +0,0 @@ -include: math_algebra.yaml -dataset_name: counting_and_probability -task: math_counting_and_prob diff --git a/lm_eval/tasks/hendrycks_math/math_geometry.yaml b/lm_eval/tasks/hendrycks_math/math_geometry.yaml deleted file mode 100644 index 4ab48b74..00000000 --- a/lm_eval/tasks/hendrycks_math/math_geometry.yaml +++ /dev/null @@ -1,3 +0,0 @@ -include: math_algebra.yaml -dataset_name: geometry -task: math_geometry diff --git a/lm_eval/tasks/hendrycks_math/math_intermediate_algebra.yaml b/lm_eval/tasks/hendrycks_math/math_intermediate_algebra.yaml deleted file mode 100644 index d554e10a..00000000 --- a/lm_eval/tasks/hendrycks_math/math_intermediate_algebra.yaml +++ /dev/null @@ -1,3 +0,0 @@ -include: math_algebra.yaml -dataset_name: intermediate_algebra -task: math_intermediate_algebra diff --git a/lm_eval/tasks/hendrycks_math/math_num_theory.yaml b/lm_eval/tasks/hendrycks_math/math_num_theory.yaml deleted file mode 100644 index 05bebf29..00000000 --- a/lm_eval/tasks/hendrycks_math/math_num_theory.yaml +++ /dev/null @@ -1,3 +0,0 @@ -include: math_algebra.yaml -dataset_name: number_theory -task: math_num_theory diff --git a/lm_eval/tasks/hendrycks_math/math_prealgebra.yaml b/lm_eval/tasks/hendrycks_math/math_prealgebra.yaml deleted file mode 100644 index c71e1ef4..00000000 --- a/lm_eval/tasks/hendrycks_math/math_prealgebra.yaml +++ /dev/null @@ -1,3 +0,0 @@ -include: math_algebra.yaml -dataset_name: prealgebra -task: math_prealgebra diff --git a/lm_eval/tasks/hendrycks_math/math_precalc.yaml b/lm_eval/tasks/hendrycks_math/math_precalc.yaml deleted file mode 100644 index 4d3d067e..00000000 --- a/lm_eval/tasks/hendrycks_math/math_precalc.yaml +++ /dev/null @@ -1,3 +0,0 @@ -include: math_algebra.yaml -dataset_name: precalculus -task: math_precalc diff --git a/lm_eval/tasks/hendrycks_math/README.md b/lm_eval/tasks/minerva_math/README.md similarity index 74% rename from lm_eval/tasks/hendrycks_math/README.md rename to lm_eval/tasks/minerva_math/README.md index 5d4cd5e9..2b1daf70 100644 --- a/lm_eval/tasks/hendrycks_math/README.md +++ b/lm_eval/tasks/minerva_math/README.md @@ -6,7 +6,7 @@ https://arxiv.org/abs/2103.03874 Many intellectual endeavors require mathematical problem solving, but this skill remains beyond the capabilities of computers. To measure this ability in machine learning models, we introduce MATH, a new dataset of 12,500 challenging competition mathematics problems. Each problem in MATH has a full step-by-step solution which can be used to teach models to generate answer derivations and explanations. -NOTE: The few-shot and the generated answer extraction is based on the [Minerva](https://arxiv.org/abs/2206.14858) paper and exact match equivalence is calculated using the `sympy` library +NOTE: The few-shot and the generated answer extraction is based on the [Minerva](https://arxiv.org/abs/2206.14858) and exact match equivalence is calculated using the `sympy` library Homepage: https://github.com/hendrycks/math @@ -19,13 +19,20 @@ Homepage: https://github.com/hendrycks/math journal={NeurIPS}, year={2021} } + +@misc{2206.14858, +Author = {Aitor Lewkowycz and Anders Andreassen and David Dohan and Ethan Dyer and Henryk Michalewski and Vinay Ramasesh and Ambrose Slone and Cem Anil and Imanol Schlag and Theo Gutman-Solo and Yuhuai Wu and Behnam Neyshabur and Guy Gur-Ari and Vedant Misra}, +Title = {Solving Quantitative Reasoning Problems with Language Models}, +Year = {2022}, +Eprint = {arXiv:2206.14858}, +} ``` ### Groups, Benchmarks and Tasks #### Benchmarks -- `hendrycks_math` +- `minerva_math` #### Groups @@ -34,13 +41,13 @@ Homepage: https://github.com/hendrycks/math #### Tasks -- `math_algebra` -- `math_counting_and_prob` -- `math_geometry` -- `math_intermediate_algebra` -- `math_num_theory` -- `math_prealgebra` -- `math_precalc` +- `minerva_math_algebra` +- `minerva_math_counting_and_prob` +- `minerva_math_geometry` +- `minerva_math_intermediate_algebra` +- `minerva_math_num_theory` +- `minerva_math_prealgebra` +- `minerva_math_precalc` ### Checklist diff --git a/lm_eval/tasks/hendrycks_math/math_algebra.yaml b/lm_eval/tasks/minerva_math/minerva_math_algebra.yaml similarity index 86% rename from lm_eval/tasks/hendrycks_math/math_algebra.yaml rename to lm_eval/tasks/minerva_math/minerva_math_algebra.yaml index bddd5eca..3ad3802b 100644 --- a/lm_eval/tasks/hendrycks_math/math_algebra.yaml +++ b/lm_eval/tasks/minerva_math/minerva_math_algebra.yaml @@ -1,7 +1,7 @@ group: - math_word_problems -task: math_algebra -dataset_path: baber/hendrycks_math +task: minerva_math_algebra +dataset_path: EleutherAI/hendrycks_math process_docs: !function utils.process_docs dataset_name: algebra output_type: greedy_until diff --git a/lm_eval/tasks/minerva_math/minerva_math_counting_and_prob.yaml b/lm_eval/tasks/minerva_math/minerva_math_counting_and_prob.yaml new file mode 100644 index 00000000..688cd711 --- /dev/null +++ b/lm_eval/tasks/minerva_math/minerva_math_counting_and_prob.yaml @@ -0,0 +1,3 @@ +include: minerva_math_algebra.yaml +dataset_name: counting_and_probability +task: minerva_math_counting_and_prob diff --git a/lm_eval/tasks/minerva_math/minerva_math_geometry.yaml b/lm_eval/tasks/minerva_math/minerva_math_geometry.yaml new file mode 100644 index 00000000..079ee70e --- /dev/null +++ b/lm_eval/tasks/minerva_math/minerva_math_geometry.yaml @@ -0,0 +1,3 @@ +include: minerva_math_algebra.yaml +dataset_name: geometry +task: minerva_math_geometry diff --git a/lm_eval/tasks/minerva_math/minerva_math_intermediate_algebra.yaml b/lm_eval/tasks/minerva_math/minerva_math_intermediate_algebra.yaml new file mode 100644 index 00000000..7b3f063c --- /dev/null +++ b/lm_eval/tasks/minerva_math/minerva_math_intermediate_algebra.yaml @@ -0,0 +1,3 @@ +include: minerva_math_algebra.yaml +dataset_name: intermediate_algebra +task: minerva_math_intermediate_algebra diff --git a/lm_eval/tasks/minerva_math/minerva_math_num_theory.yaml b/lm_eval/tasks/minerva_math/minerva_math_num_theory.yaml new file mode 100644 index 00000000..44f587bc --- /dev/null +++ b/lm_eval/tasks/minerva_math/minerva_math_num_theory.yaml @@ -0,0 +1,3 @@ +include: minerva_math_algebra.yaml +dataset_name: number_theory +task: minerva_math_num_theory diff --git a/lm_eval/tasks/minerva_math/minerva_math_prealgebra.yaml b/lm_eval/tasks/minerva_math/minerva_math_prealgebra.yaml new file mode 100644 index 00000000..865e2f2c --- /dev/null +++ b/lm_eval/tasks/minerva_math/minerva_math_prealgebra.yaml @@ -0,0 +1,3 @@ +include: minerva_math_algebra.yaml +dataset_name: prealgebra +task: minerva_math_prealgebra diff --git a/lm_eval/tasks/minerva_math/minerva_math_precalc.yaml b/lm_eval/tasks/minerva_math/minerva_math_precalc.yaml new file mode 100644 index 00000000..06e63abc --- /dev/null +++ b/lm_eval/tasks/minerva_math/minerva_math_precalc.yaml @@ -0,0 +1,3 @@ +include: minerva_math_algebra.yaml +dataset_name: precalculus +task: minerva_math_precalc diff --git a/lm_eval/tasks/hendrycks_math/utils.py b/lm_eval/tasks/minerva_math/utils.py similarity index 100% rename from lm_eval/tasks/hendrycks_math/utils.py rename to lm_eval/tasks/minerva_math/utils.py -- GitLab From 894370df6d7203445c5a952acca0de68d3258325 Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Fri, 15 Sep 2023 02:31:50 +0500 Subject: [PATCH 174/264] make typing 3.8 compatible --- .github/workflows/new_tasks.yml | 4 ++-- tests/test_evaluator.py | 3 ++- tests/utils.py | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/new_tasks.yml b/.github/workflows/new_tasks.yml index edafb5b1..ebb28a30 100644 --- a/.github/workflows/new_tasks.yml +++ b/.github/workflows/new_tasks.yml @@ -63,10 +63,10 @@ jobs: - name: Test with pytest # if new tasks are added, run tests on them if: steps.changed-tasks.outputs.tasks_any_modified == 'true' - run: python -m pytest tests/test_tasks.py -s -vv -n=auto + run: python -m pytest tests/test_tasks.py -s -vv # if api is modified, run tests on it - name: Test more tasks with pytest env: API: true if: steps.changed-tasks.outputs.api_any_modified == 'true' - run: python -m pytest tests/test_tasks.py -s -vv -n=auto + run: python -m pytest tests/test_tasks.py -s -vv diff --git a/tests/test_evaluator.py b/tests/test_evaluator.py index 40f38a95..ccd2f850 100644 --- a/tests/test_evaluator.py +++ b/tests/test_evaluator.py @@ -7,6 +7,7 @@ import lm_eval.tasks as tasks # import lm_eval.models as models import lm_eval.api as api import lm_eval.evaluator as evaluator +from typing import List import random import pytest @@ -26,7 +27,7 @@ import pytest ) ], ) -def test_evaluator(task_name: list[str], limit: int, model: str, model_args: str): +def test_evaluator(task_name: List[str], limit: int, model: str, model_args: str): task_name = task_name limit = 10 diff --git a/tests/utils.py b/tests/utils.py index 089bc3ca..3555541e 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -9,6 +9,7 @@ import os # This is the path where the output for the changed files for the tasks folder is stored # FILE_PATH = file_path = ".github/outputs/tasks_all_changed_and_modified_files.txt" + # reads a text file and returns a list of words # used to read the output of the changed txt from tj-actions/changed-files def load_changed_files(file_path: str) -> List[str]: @@ -32,7 +33,7 @@ def parser(full_path: List[str]) -> List[str]: return list(_output) -def new_tasks() -> Union[list[str], None]: +def new_tasks() -> Union[List[str], None]: FILENAME = ".github/outputs/tasks_all_changed_and_modified_files.txt" if os.path.exists(FILENAME): # If tasks folder has changed then we get the list of files from FILENAME -- GitLab From e4bdffd024ad38577a60c9acc62d0ba20a2314ab Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 15 Sep 2023 14:20:59 +0000 Subject: [PATCH 175/264] add first_n sampler --- lm_eval/api/samplers.py | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/lm_eval/api/samplers.py b/lm_eval/api/samplers.py index 59e21f07..6b3af4e9 100644 --- a/lm_eval/api/samplers.py +++ b/lm_eval/api/samplers.py @@ -1,4 +1,4 @@ -class Sampler: +class ContextSampler: def __init__(self, docs, task, fewshot_indices=None, rnd=None) -> None: self.rnd = rnd assert self.rnd, "must pass rnd to FewShotSampler!" @@ -71,7 +71,19 @@ class Sampler: return self.rnd.sample(self.docs, n) -class BalancedSampler(Sampler): +class FirstNSampler(ContextSampler): + def sample(self, n) -> None: + """ + Draw the first `n` samples in order from the specified split. + Used for tasks with "canonical" ordered fewshot examples, such as MMLU and CMMLU. + """ + assert n <= len( + self.docs + ), f"Error: number of fewshot samples requested exceeds the {len(self.docs)} that are available." + return self.docs[:n] + + +class BalancedSampler(ContextSampler): def sample(self, n) -> None: """ TODO: this should return approximately class-balanced samples from our fewshot examples. @@ -81,12 +93,27 @@ class BalancedSampler(Sampler): pass -class ManualSampler(Sampler): +class ManualSampler(ContextSampler): def sample(self, n) -> None: """ """ pass +SAMPLER_REGISTRY = { + "default": ContextSampler, + "first_n": FirstNSampler, +} + + +def get_sampler(name): + try: + return SAMPLER_REGISTRY[name] + except KeyError: + raise ValueError( + f"Attempted to use contextsampler '{name}', but no sampling strategy for this name found! Supported model names: {', '.join(SAMPLER_REGISTRY.keys())}" + ) + + # TODO: how should we do design here? might be better to have a single sampler and pass more kwargs at init. # Depends what's easier for new user to add own functionality on top of -- GitLab From 624fb20752c9d51de9762715377d6c7d3b8b9281 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 15 Sep 2023 14:29:46 +0000 Subject: [PATCH 176/264] reenable bootstrapping --- lm_eval/evaluator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 2975f9d9..b4151146 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -470,10 +470,10 @@ def evaluate( # hotfix: bleu, chrf, ter seem to be really expensive to bootstrap # so we run them less iterations. still looking for a cleaner way to do this - if False: # bootstrap_iters > 0: + if bootstrap_iters > 0: stderr = lm_eval.api.metrics.stderr_for_metric( metric=task.aggregation()[metric], - bootstrap_iters=min(bootstrap_iters, 1000) + bootstrap_iters=min(bootstrap_iters, 100) if metric in ["bleu", "chrf", "ter"] else bootstrap_iters, ) -- GitLab From 9d0df41bd02d8d2252300646de86ffd232621c69 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 15 Sep 2023 14:30:11 +0000 Subject: [PATCH 177/264] add fewshot_config field --- lm_eval/api/task.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 95c08af4..28711368 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -75,6 +75,7 @@ class TaskConfig(dict): description: str = "" target_delimiter: str = " " fewshot_delimiter: str = "\n\n" + fewshot_config: dict = None # runtime configuration options num_fewshot: int = 0 # scoring options @@ -629,9 +630,9 @@ class ConfigurableTask(Task): self.prompt = None if self.fewshot_docs() is not None: - self.sampler = samplers.Sampler( - list(self.fewshot_docs()), self, rnd=random.Random(1234) - ) + self.sampler = samplers.get_sampler( + self.config.fewshot_config.get("sampler", "default") + )(list(self.fewshot_docs()), self, rnd=random.Random(1234)) if self.has_test_docs(): self.task_docs = self.test_docs() -- GitLab From 13c6f5e74ed51ca301b4dac82de0422779751439 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 15 Sep 2023 16:16:38 +0000 Subject: [PATCH 178/264] add draft cmmlu port --- .pre-commit-config.yaml | 2 +- lm_eval/tasks/cmmlu/README.md | 173 ++++++++++++++++++ lm_eval/tasks/cmmlu/_default_template_yaml | 17 ++ lm_eval/tasks/cmmlu/_generate_configs.py | 131 +++++++++++++ .../tasks/cmmlu/cmmlu_default_agronomy.yaml | 4 + .../tasks/cmmlu/cmmlu_default_anatomy.yaml | 4 + .../cmmlu/cmmlu_default_ancient_chinese.yaml | 4 + lm_eval/tasks/cmmlu/cmmlu_default_arts.yaml | 4 + .../tasks/cmmlu/cmmlu_default_astronomy.yaml | 4 + .../cmmlu/cmmlu_default_business_ethics.yaml | 4 + ...lu_default_chinese_civil_service_exam.yaml | 4 + .../cmmlu_default_chinese_driving_rule.yaml | 4 + .../cmmlu_default_chinese_food_culture.yaml | 4 + .../cmmlu_default_chinese_foreign_policy.yaml | 4 + .../cmmlu/cmmlu_default_chinese_history.yaml | 4 + .../cmmlu_default_chinese_literature.yaml | 4 + ...default_chinese_teacher_qualification.yaml | 4 + .../cmmlu_default_clinical_knowledge.yaml | 4 + ...mlu_default_college_actuarial_science.yaml | 4 + .../cmmlu_default_college_education.yaml | 4 + ...default_college_engineering_hydrology.yaml | 4 + .../cmmlu/cmmlu_default_college_law.yaml | 4 + .../cmmlu_default_college_mathematics.yaml | 4 + ...lu_default_college_medical_statistics.yaml | 4 + .../cmmlu/cmmlu_default_college_medicine.yaml | 4 + .../cmmlu/cmmlu_default_computer_science.yaml | 4 + .../cmmlu_default_computer_security.yaml | 4 + .../cmmlu_default_conceptual_physics.yaml | 4 + ...fault_construction_project_management.yaml | 4 + .../tasks/cmmlu/cmmlu_default_economics.yaml | 4 + .../tasks/cmmlu/cmmlu_default_education.yaml | 4 + .../cmmlu_default_electrical_engineering.yaml | 4 + .../cmmlu_default_elementary_chinese.yaml | 4 + .../cmmlu_default_elementary_commonsense.yaml | 4 + ...elementary_information_and_technology.yaml | 4 + .../cmmlu_default_elementary_mathematics.yaml | 4 + .../tasks/cmmlu/cmmlu_default_ethnology.yaml | 4 + .../cmmlu/cmmlu_default_food_science.yaml | 4 + .../tasks/cmmlu/cmmlu_default_genetics.yaml | 4 + .../cmmlu/cmmlu_default_global_facts.yaml | 4 + .../cmmlu_default_high_school_biology.yaml | 4 + .../cmmlu_default_high_school_chemistry.yaml | 4 + .../cmmlu_default_high_school_geography.yaml | 4 + ...cmmlu_default_high_school_mathematics.yaml | 4 + .../cmmlu_default_high_school_physics.yaml | 4 + .../cmmlu_default_high_school_politics.yaml | 4 + .../cmmlu/cmmlu_default_human_sexuality.yaml | 4 + .../cmmlu_default_international_law.yaml | 4 + .../tasks/cmmlu/cmmlu_default_journalism.yaml | 4 + .../cmmlu/cmmlu_default_jurisprudence.yaml | 4 + .../cmmlu_default_legal_and_moral_basis.yaml | 4 + .../tasks/cmmlu/cmmlu_default_logical.yaml | 4 + .../cmmlu/cmmlu_default_machine_learning.yaml | 4 + .../tasks/cmmlu/cmmlu_default_management.yaml | 4 + .../tasks/cmmlu/cmmlu_default_marketing.yaml | 4 + .../cmmlu/cmmlu_default_marxist_theory.yaml | 4 + .../cmmlu/cmmlu_default_modern_chinese.yaml | 4 + .../tasks/cmmlu/cmmlu_default_nutrition.yaml | 4 + .../tasks/cmmlu/cmmlu_default_philosophy.yaml | 4 + ...cmmlu_default_professional_accounting.yaml | 4 + .../cmmlu/cmmlu_default_professional_law.yaml | 4 + .../cmmlu_default_professional_medicine.yaml | 4 + ...cmmlu_default_professional_psychology.yaml | 4 + .../cmmlu/cmmlu_default_public_relations.yaml | 4 + .../cmmlu/cmmlu_default_security_study.yaml | 4 + .../tasks/cmmlu/cmmlu_default_sociology.yaml | 4 + .../cmmlu/cmmlu_default_sports_science.yaml | 4 + ..._default_traditional_chinese_medicine.yaml | 4 + .../tasks/cmmlu/cmmlu_default_virology.yaml | 4 + .../cmmlu/cmmlu_default_world_history.yaml | 4 + .../cmmlu/cmmlu_default_world_religions.yaml | 4 + 71 files changed, 590 insertions(+), 1 deletion(-) create mode 100644 lm_eval/tasks/cmmlu/README.md create mode 100644 lm_eval/tasks/cmmlu/_default_template_yaml create mode 100644 lm_eval/tasks/cmmlu/_generate_configs.py create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_arts.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_astronomy.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_business_ethics.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_chinese_civil_service_exam.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_chinese_literature.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_chinese_teacher_qualification.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_college_actuarial_science.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_college_mathematics.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_college_medical_statistics.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_computer_science.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_conceptual_physics.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_construction_project_management.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_economics.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_education.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_elementary_information_and_technology.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_food_science.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_genetics.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_high_school_biology.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_high_school_chemistry.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_high_school_mathematics.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_high_school_physics.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_high_school_politics.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_human_sexuality.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_international_law.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_journalism.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_jurisprudence.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_legal_and_moral_basis.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_logical.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_machine_learning.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_management.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_marketing.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_marxist_theory.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_modern_chinese.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_nutrition.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_philosophy.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_professional_accounting.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_professional_law.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_professional_medicine.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_public_relations.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_security_study.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_sociology.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_sports_science.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_traditional_chinese_medicine.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_virology.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_world_history.yaml create mode 100644 lm_eval/tasks/cmmlu/cmmlu_default_world_religions.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 31ef5ca0..9d5d3309 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,7 +40,7 @@ repos: - id: codespell exclude: > (?x)^( - .*\.json|ignore.txt + .*\.json|ignore.txt|.*yaml )$ args: [--check-filenames, --check-hidden, --ignore-words=ignore.txt] - repo: https://github.com/pre-commit/mirrors-mypy diff --git a/lm_eval/tasks/cmmlu/README.md b/lm_eval/tasks/cmmlu/README.md new file mode 100644 index 00000000..27596ee8 --- /dev/null +++ b/lm_eval/tasks/cmmlu/README.md @@ -0,0 +1,173 @@ +# CMMLU + +### Paper + +CMMLU: Measuring massive multitask language understanding in Chinese +https://arxiv.org/abs/2306.09212 + +CMMLU is a comprehensive evaluation benchmark specifically designed to evaluate the knowledge and reasoning abilities of LLMs within the context of Chinese language and culture. +CMMLU covers a wide range of subjects, comprising 67 topics that span from elementary to advanced professional levels. + +Homepage: https://github.com/haonan-li/CMMLU + +### Citation + +```bibtex +@misc{li2023cmmlu, + title={CMMLU: Measuring massive multitask language understanding in Chinese}, + author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin}, + year={2023}, + eprint={2306.09212}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + + +| Tasks |Version|Filter| Metric |Value | |Stderr| +|--------------------------------------------|-------|------|--------------------|-----:|---|-----:| +|cmmlu |N/A |none |acc |0.2480| | | +| | |none |acc(sample agg) |0.2494| | | +| | |none |acc_norm |0.2480| | | +| | |none |acc_norm(sample agg)|0.2494| | | +|-cmmlu_modern_chinese |Yaml |none |acc |0.2500|± |0.0404| +| | |none |acc_norm |0.2500|± |0.0404| +|-cmmlu_world_history |Yaml |none |acc |0.2484|± |0.0342| +| | |none |acc_norm |0.2484|± |0.0342| +|-cmmlu_college_education |Yaml |none |acc |0.2523|± |0.0422| +| | |none |acc_norm |0.2523|± |0.0422| +|-cmmlu_international_law |Yaml |none |acc |0.2486|± |0.0319| +| | |none |acc_norm |0.2486|± |0.0319| +|-cmmlu_philosophy |Yaml |none |acc |0.1905|± |0.0385| +| | |none |acc_norm |0.1905|± |0.0385| +|-cmmlu_professional_psychology |Yaml |none |acc |0.2457|± |0.0283| +| | |none |acc_norm |0.2457|± |0.0283| +|-cmmlu_college_engineering_hydrology |Yaml |none |acc |0.2830|± |0.0440| +| | |none |acc_norm |0.2830|± |0.0440| +|-cmmlu_electrical_engineering |Yaml |none |acc |0.2442|± |0.0329| +| | |none |acc_norm |0.2442|± |0.0329| +|-cmmlu_ancient_chinese |Yaml |none |acc |0.2378|± |0.0333| +| | |none |acc_norm |0.2378|± |0.0333| +|-cmmlu_chinese_food_culture |Yaml |none |acc |0.2353|± |0.0365| +| | |none |acc_norm |0.2353|± |0.0365| +|-cmmlu_chinese_literature |Yaml |none |acc |0.2598|± |0.0308| +| | |none |acc_norm |0.2598|± |0.0308| +|-cmmlu_legal_and_moral_basis |Yaml |none |acc |0.2477|± |0.0296| +| | |none |acc_norm |0.2477|± |0.0296| +|-cmmlu_construction_project_management |Yaml |none |acc |0.2374|± |0.0362| +| | |none |acc_norm |0.2374|± |0.0362| +|-cmmlu_ethnology |Yaml |none |acc |0.2519|± |0.0375| +| | |none |acc_norm |0.2519|± |0.0375| +|-cmmlu_high_school_geography |Yaml |none |acc |0.2542|± |0.0403| +| | |none |acc_norm |0.2542|± |0.0403| +|-cmmlu_professional_medicine |Yaml |none |acc |0.2500|± |0.0224| +| | |none |acc_norm |0.2500|± |0.0224| +|-cmmlu_global_facts |Yaml |none |acc |0.2349|± |0.0348| +| | |none |acc_norm |0.2349|± |0.0348| +|-cmmlu_astronomy |Yaml |none |acc |0.2303|± |0.0329| +| | |none |acc_norm |0.2303|± |0.0329| +|-cmmlu_machine_learning |Yaml |none |acc |0.2541|± |0.0396| +| | |none |acc_norm |0.2541|± |0.0396| +|-cmmlu_high_school_politics |Yaml |none |acc |0.2378|± |0.0357| +| | |none |acc_norm |0.2378|± |0.0357| +|-cmmlu_chinese_civil_service_exam |Yaml |none |acc |0.2562|± |0.0346| +| | |none |acc_norm |0.2562|± |0.0346| +|-cmmlu_professional_law |Yaml |none |acc |0.2512|± |0.0299| +| | |none |acc_norm |0.2512|± |0.0299| +|-cmmlu_college_medical_statistics |Yaml |none |acc |0.2453|± |0.0420| +| | |none |acc_norm |0.2453|± |0.0420| +|-cmmlu_computer_security |Yaml |none |acc |0.2573|± |0.0335| +| | |none |acc_norm |0.2573|± |0.0335| +|-cmmlu_food_science |Yaml |none |acc |0.2238|± |0.0350| +| | |none |acc_norm |0.2238|± |0.0350| +|-cmmlu_security_study |Yaml |none |acc |0.2519|± |0.0375| +| | |none |acc_norm |0.2519|± |0.0375| +|-cmmlu_high_school_physics |Yaml |none |acc |0.2545|± |0.0417| +| | |none |acc_norm |0.2545|± |0.0417| +|-cmmlu_management |Yaml |none |acc |0.2476|± |0.0299| +| | |none |acc_norm |0.2476|± |0.0299| +|-cmmlu_professional_accounting |Yaml |none |acc |0.2514|± |0.0329| +| | |none |acc_norm |0.2514|± |0.0329| +|-cmmlu_human_sexuality |Yaml |none |acc |0.2222|± |0.0372| +| | |none |acc_norm |0.2222|± |0.0372| +|-cmmlu_marxist_theory |Yaml |none |acc |0.2487|± |0.0315| +| | |none |acc_norm |0.2487|± |0.0315| +|-cmmlu_agronomy |Yaml |none |acc |0.2426|± |0.0331| +| | |none |acc_norm |0.2426|± |0.0331| +|-cmmlu_chinese_teacher_qualification |Yaml |none |acc |0.2626|± |0.0330| +| | |none |acc_norm |0.2626|± |0.0330| +|-cmmlu_genetics |Yaml |none |acc |0.2273|± |0.0317| +| | |none |acc_norm |0.2273|± |0.0317| +|-cmmlu_sports_science |Yaml |none |acc |0.2727|± |0.0348| +| | |none |acc_norm |0.2727|± |0.0348| +|-cmmlu_elementary_commonsense |Yaml |none |acc |0.2424|± |0.0305| +| | |none |acc_norm |0.2424|± |0.0305| +|-cmmlu_logical |Yaml |none |acc |0.1951|± |0.0359| +| | |none |acc_norm |0.1951|± |0.0359| +|-cmmlu_chinese_history |Yaml |none |acc |0.2508|± |0.0242| +| | |none |acc_norm |0.2508|± |0.0242| +|-cmmlu_traditional_chinese_medicine |Yaml |none |acc |0.2378|± |0.0314| +| | |none |acc_norm |0.2378|± |0.0314| +|-cmmlu_elementary_mathematics |Yaml |none |acc |0.2609|± |0.0290| +| | |none |acc_norm |0.2609|± |0.0290| +|-cmmlu_nutrition |Yaml |none |acc |0.2552|± |0.0363| +| | |none |acc_norm |0.2552|± |0.0363| +|-cmmlu_chinese_foreign_policy |Yaml |none |acc |0.1776|± |0.0371| +| | |none |acc_norm |0.1776|± |0.0371| +|-cmmlu_journalism |Yaml |none |acc |0.2616|± |0.0336| +| | |none |acc_norm |0.2616|± |0.0336| +|-cmmlu_jurisprudence |Yaml |none |acc |0.2506|± |0.0214| +| | |none |acc_norm |0.2506|± |0.0214| +|-cmmlu_sociology |Yaml |none |acc |0.2478|± |0.0288| +| | |none |acc_norm |0.2478|± |0.0288| +|-cmmlu_college_mathematics |Yaml |none |acc |0.2190|± |0.0406| +| | |none |acc_norm |0.2190|± |0.0406| +|-cmmlu_computer_science |Yaml |none |acc |0.2549|± |0.0306| +| | |none |acc_norm |0.2549|± |0.0306| +|-cmmlu_conceptual_physics |Yaml |none |acc |0.2517|± |0.0359| +| | |none |acc_norm |0.2517|± |0.0359| +|-cmmlu_elementary_chinese |Yaml |none |acc |0.2817|± |0.0284| +| | |none |acc_norm |0.2817|± |0.0284| +|-cmmlu_marketing |Yaml |none |acc |0.2500|± |0.0324| +| | |none |acc_norm |0.2500|± |0.0324| +|-cmmlu_high_school_chemistry |Yaml |none |acc |0.2576|± |0.0382| +| | |none |acc_norm |0.2576|± |0.0382| +|-cmmlu_college_law |Yaml |none |acc |0.2315|± |0.0408| +| | |none |acc_norm |0.2315|± |0.0408| +|-cmmlu_chinese_driving_rule |Yaml |none |acc |0.2595|± |0.0384| +| | |none |acc_norm |0.2595|± |0.0384| +|-cmmlu_clinical_knowledge |Yaml |none |acc |0.2532|± |0.0283| +| | |none |acc_norm |0.2532|± |0.0283| +|-cmmlu_education |Yaml |none |acc |0.2761|± |0.0351| +| | |none |acc_norm |0.2761|± |0.0351| +|-cmmlu_high_school_mathematics |Yaml |none |acc |0.2927|± |0.0356| +| | |none |acc_norm |0.2927|± |0.0356| +|-cmmlu_college_actuarial_science |Yaml |none |acc |0.2736|± |0.0435| +| | |none |acc_norm |0.2736|± |0.0435| +|-cmmlu_arts |Yaml |none |acc |0.2313|± |0.0334| +| | |none |acc_norm |0.2313|± |0.0334| +|-cmmlu_public_relations |Yaml |none |acc |0.2471|± |0.0328| +| | |none |acc_norm |0.2471|± |0.0328| +|-cmmlu_college_medicine |Yaml |none |acc |0.2418|± |0.0260| +| | |none |acc_norm |0.2418|± |0.0260| +|-cmmlu_economics |Yaml |none |acc |0.2453|± |0.0342| +| | |none |acc_norm |0.2453|± |0.0342| +|-cmmlu_elementary_information_and_technology|Yaml |none |acc |0.2731|± |0.0289| +| | |none |acc_norm |0.2731|± |0.0289| +|-cmmlu_anatomy |Yaml |none |acc |0.2432|± |0.0354| +| | |none |acc_norm |0.2432|± |0.0354| +|-cmmlu_world_religions |Yaml |none |acc |0.2875|± |0.0359| +| | |none |acc_norm |0.2875|± |0.0359| +|-cmmlu_virology |Yaml |none |acc |0.2485|± |0.0333| +| | |none |acc_norm |0.2485|± |0.0333| +|-cmmlu_high_school_biology |Yaml |none |acc |0.2485|± |0.0333| +| | |none |acc_norm |0.2485|± |0.0333| +|-cmmlu_business_ethics |Yaml |none |acc |0.2584|± |0.0304| +| | |none |acc_norm |0.2584|± |0.0304| + +|Groups|Version|Filter| Metric |Value | |Stderr| +|------|-------|------|--------------------|-----:|---|------| +|cmmlu |N/A |none |acc |0.2480| | | +| | |none |acc(sample agg) |0.2494| | | +| | |none |acc_norm |0.2480| | | +| | |none |acc_norm(sample agg)|0.2494| | | diff --git a/lm_eval/tasks/cmmlu/_default_template_yaml b/lm_eval/tasks/cmmlu/_default_template_yaml new file mode 100644 index 00000000..198e60f2 --- /dev/null +++ b/lm_eval/tasks/cmmlu/_default_template_yaml @@ -0,0 +1,17 @@ +group: cmmlu +dataset_path: haonan-li/cmmlu +test_split: test +fewshot_split: dev +fewshot_config: + sampler: first_n +output_type: multiple_choice +doc_to_text: "{{Question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: "{{['A', 'B', 'C', 'D'].index(Answer)}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true diff --git a/lm_eval/tasks/cmmlu/_generate_configs.py b/lm_eval/tasks/cmmlu/_generate_configs.py new file mode 100644 index 00000000..4b3dba75 --- /dev/null +++ b/lm_eval/tasks/cmmlu/_generate_configs.py @@ -0,0 +1,131 @@ +""" +Take in a YAML, and output all other splits with this YAML +""" +import os +import yaml +import argparse + +from tqdm import tqdm + +from lm_eval.logger import eval_logger + +SUBJECTS = { + "agronomy": "农学", + "anatomy": "解剖学", + "ancient_chinese": "古汉语", + "arts": "艺术学", + "astronomy": "天文学", + "business_ethics": "商业伦理", + "chinese_civil_service_exam": "中国公务员考试", + "chinese_driving_rule": "中国驾驶规则", + "chinese_food_culture": "中国饮食文化", + "chinese_foreign_policy": "中国外交政策", + "chinese_history": "中国历史", + "chinese_literature": "中国文学", + "chinese_teacher_qualification": "中国教师资格", + "clinical_knowledge": "临床知识", + "college_actuarial_science": "大学精算学", + "college_education": "大学教育学", + "college_engineering_hydrology": "大学工程水文学", + "college_law": "大学法律", + "college_mathematics": "大学数学", + "college_medical_statistics": "大学医学统计", + "college_medicine": "大学医学", + "computer_science": "计算机科学", + "computer_security": "计算机安全", + "conceptual_physics": "概念物理学", + "construction_project_management": "建设工程管理", + "economics": "经济学", + "education": "教育学", + "electrical_engineering": "电气工程", + "elementary_chinese": "小学语文", + "elementary_commonsense": "小学常识", + "elementary_information_and_technology": "小学信息技术", + "elementary_mathematics": "初等数学", + "ethnology": "民族学", + "food_science": "食品科学", + "genetics": "遗传学", + "global_facts": "全球事实", + "high_school_biology": "高中生物", + "high_school_chemistry": "高中化学", + "high_school_geography": "高中地理", + "high_school_mathematics": "高中数学", + "high_school_physics": "高中物理学", + "high_school_politics": "高中政治", + "human_sexuality": "人类性行为", + "international_law": "国际法学", + "journalism": "新闻学", + "jurisprudence": "法理学", + "legal_and_moral_basis": "法律与道德基础", + "logical": "逻辑学", + "machine_learning": "机器学习", + "management": "管理学", + "marketing": "市场营销", + "marxist_theory": "马克思主义理论", + "modern_chinese": "现代汉语", + "nutrition": "营养学", + "philosophy": "哲学", + "professional_accounting": "专业会计", + "professional_law": "专业法学", + "professional_medicine": "专业医学", + "professional_psychology": "专业心理学", + "public_relations": "公共关系", + "security_study": "安全研究", + "sociology": "社会学", + "sports_science": "体育学", + "traditional_chinese_medicine": "中医中药", + "virology": "病毒学", + "world_history": "世界历史", + "world_religions": "世界宗教", +} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument("--save_prefix_path", default="cmmlu") + parser.add_argument("--cot_prompt_path", default=None) + parser.add_argument("--task_prefix", default="") + return parser.parse_args() + + +if __name__ == "__main__": + + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our other YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path) as f: + base_yaml = yaml.full_load(f) + + if args.cot_prompt_path is not None: + import json + + with open(args.cot_prompt_path) as f: + cot_file = json.load(f) + + for subject_eng, subject_zh in tqdm(SUBJECTS.items()): + if args.cot_prompt_path is not None: + description = cot_file[subject_eng] + else: + description = f"以下是关于{subject_zh}的单项选择题,请直接给出正确答案的选项。\n\n" + + yaml_dict = { + "include": base_yaml_name, + "task": f"cmmlu_{args.task_prefix}_{subject_eng}" + if args.task_prefix != "" + else f"cmmlu_{subject_eng}", + "dataset_name": subject_eng, + "description": description, + } + + file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml" + eval_logger.info(f"Saving yaml for subset {subject_eng} to {file_save_path}") + with open(file_save_path, "w") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + width=float("inf"), + allow_unicode=True, + default_style='"', + ) diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml new file mode 100644 index 00000000..034ce661 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "agronomy" +"description": "以下是关于农学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_agronomy" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml new file mode 100644 index 00000000..84161ec3 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "anatomy" +"description": "以下是关于解剖学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_anatomy" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml new file mode 100644 index 00000000..2099e0a1 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ancient_chinese" +"description": "以下是关于古汉语的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_ancient_chinese" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_arts.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_arts.yaml new file mode 100644 index 00000000..6007825c --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_arts.yaml @@ -0,0 +1,4 @@ +"dataset_name": "arts" +"description": "以下是关于艺术学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_arts" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_astronomy.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_astronomy.yaml new file mode 100644 index 00000000..5ad9a8f2 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_astronomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "astronomy" +"description": "以下是关于天文学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_astronomy" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_business_ethics.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_business_ethics.yaml new file mode 100644 index 00000000..10135b60 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_business_ethics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "business_ethics" +"description": "以下是关于商业伦理的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_business_ethics" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_chinese_civil_service_exam.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_civil_service_exam.yaml new file mode 100644 index 00000000..dcf6c7e6 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_civil_service_exam.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_civil_service_exam" +"description": "以下是关于中国公务员考试的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_civil_service_exam" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml new file mode 100644 index 00000000..2dce17c0 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_driving_rule" +"description": "以下是关于中国驾驶规则的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_driving_rule" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml new file mode 100644 index 00000000..52400c56 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_food_culture" +"description": "以下是关于中国饮食文化的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_food_culture" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml new file mode 100644 index 00000000..bbb34a96 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_foreign_policy" +"description": "以下是关于中国外交政策的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_foreign_policy" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml new file mode 100644 index 00000000..26fe1db5 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_history" +"description": "以下是关于中国历史的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_history" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_chinese_literature.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_literature.yaml new file mode 100644 index 00000000..4166c122 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_literature.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_literature" +"description": "以下是关于中国文学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_literature" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_chinese_teacher_qualification.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_teacher_qualification.yaml new file mode 100644 index 00000000..226e98a9 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_chinese_teacher_qualification.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_teacher_qualification" +"description": "以下是关于中国教师资格的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_teacher_qualification" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml new file mode 100644 index 00000000..6ed36425 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml @@ -0,0 +1,4 @@ +"dataset_name": "clinical_knowledge" +"description": "以下是关于临床知识的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_clinical_knowledge" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_college_actuarial_science.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_college_actuarial_science.yaml new file mode 100644 index 00000000..3543486b --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_college_actuarial_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_actuarial_science" +"description": "以下是关于大学精算学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_actuarial_science" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml new file mode 100644 index 00000000..952f351c --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_education" +"description": "以下是关于大学教育学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_education" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml new file mode 100644 index 00000000..d52288a4 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_engineering_hydrology" +"description": "以下是关于大学工程水文学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_engineering_hydrology" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml new file mode 100644 index 00000000..717784ac --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_law" +"description": "以下是关于大学法律的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_law" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_college_mathematics.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_college_mathematics.yaml new file mode 100644 index 00000000..7876a584 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_college_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_mathematics" +"description": "以下是关于大学数学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_mathematics" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_college_medical_statistics.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_college_medical_statistics.yaml new file mode 100644 index 00000000..f4839bdc --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_college_medical_statistics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_medical_statistics" +"description": "以下是关于大学医学统计的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_medical_statistics" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml new file mode 100644 index 00000000..dff0d786 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_medicine" +"description": "以下是关于大学医学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_medicine" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_computer_science.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_computer_science.yaml new file mode 100644 index 00000000..86c874e5 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_computer_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_science" +"description": "以下是关于计算机科学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_computer_science" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml new file mode 100644 index 00000000..94715461 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_security" +"description": "以下是关于计算机安全的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_computer_security" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_conceptual_physics.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_conceptual_physics.yaml new file mode 100644 index 00000000..ba952486 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_conceptual_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "conceptual_physics" +"description": "以下是关于概念物理学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_conceptual_physics" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_construction_project_management.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_construction_project_management.yaml new file mode 100644 index 00000000..86265b08 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_construction_project_management.yaml @@ -0,0 +1,4 @@ +"dataset_name": "construction_project_management" +"description": "以下是关于建设工程管理的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_construction_project_management" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_economics.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_economics.yaml new file mode 100644 index 00000000..4602efb4 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_economics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "economics" +"description": "以下是关于经济学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_economics" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_education.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_education.yaml new file mode 100644 index 00000000..f1dc8a8a --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_education.yaml @@ -0,0 +1,4 @@ +"dataset_name": "education" +"description": "以下是关于教育学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_education" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml new file mode 100644 index 00000000..2bb920b5 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml @@ -0,0 +1,4 @@ +"dataset_name": "electrical_engineering" +"description": "以下是关于电气工程的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_electrical_engineering" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml new file mode 100644 index 00000000..6f67be3f --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_chinese" +"description": "以下是关于小学语文的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_elementary_chinese" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml new file mode 100644 index 00000000..3017edd9 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_commonsense" +"description": "以下是关于小学常识的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_elementary_commonsense" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_elementary_information_and_technology.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_elementary_information_and_technology.yaml new file mode 100644 index 00000000..98c7d3c8 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_elementary_information_and_technology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_information_and_technology" +"description": "以下是关于小学信息技术的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_elementary_information_and_technology" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml new file mode 100644 index 00000000..f702312c --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_mathematics" +"description": "以下是关于初等数学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_elementary_mathematics" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml new file mode 100644 index 00000000..88a653a9 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ethnology" +"description": "以下是关于民族学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_ethnology" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_food_science.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_food_science.yaml new file mode 100644 index 00000000..9be450ca --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_food_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "food_science" +"description": "以下是关于食品科学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_food_science" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_genetics.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_genetics.yaml new file mode 100644 index 00000000..be57628b --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_genetics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "genetics" +"description": "以下是关于遗传学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_genetics" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml new file mode 100644 index 00000000..6982be94 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml @@ -0,0 +1,4 @@ +"dataset_name": "global_facts" +"description": "以下是关于全球事实的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_global_facts" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_high_school_biology.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_biology.yaml new file mode 100644 index 00000000..a56e7974 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_biology" +"description": "以下是关于高中生物的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_high_school_biology" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_high_school_chemistry.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_chemistry.yaml new file mode 100644 index 00000000..34e99ea0 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_chemistry" +"description": "以下是关于高中化学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_high_school_chemistry" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml new file mode 100644 index 00000000..c73ebe91 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_geography" +"description": "以下是关于高中地理的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_high_school_geography" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_high_school_mathematics.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_mathematics.yaml new file mode 100644 index 00000000..3598501c --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_mathematics" +"description": "以下是关于高中数学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_high_school_mathematics" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_high_school_physics.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_physics.yaml new file mode 100644 index 00000000..676fca16 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_physics" +"description": "以下是关于高中物理学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_high_school_physics" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_high_school_politics.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_politics.yaml new file mode 100644 index 00000000..5f689dff --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_high_school_politics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_politics" +"description": "以下是关于高中政治的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_high_school_politics" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_human_sexuality.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_human_sexuality.yaml new file mode 100644 index 00000000..39ff32e7 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_human_sexuality.yaml @@ -0,0 +1,4 @@ +"dataset_name": "human_sexuality" +"description": "以下是关于人类性行为的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_human_sexuality" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_international_law.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_international_law.yaml new file mode 100644 index 00000000..32112d3c --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_international_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "international_law" +"description": "以下是关于国际法学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_international_law" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_journalism.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_journalism.yaml new file mode 100644 index 00000000..9f283816 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_journalism.yaml @@ -0,0 +1,4 @@ +"dataset_name": "journalism" +"description": "以下是关于新闻学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_journalism" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_jurisprudence.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_jurisprudence.yaml new file mode 100644 index 00000000..ab40da40 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_jurisprudence.yaml @@ -0,0 +1,4 @@ +"dataset_name": "jurisprudence" +"description": "以下是关于法理学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_jurisprudence" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_legal_and_moral_basis.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_legal_and_moral_basis.yaml new file mode 100644 index 00000000..a5e3ee13 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_legal_and_moral_basis.yaml @@ -0,0 +1,4 @@ +"dataset_name": "legal_and_moral_basis" +"description": "以下是关于法律与道德基础的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_legal_and_moral_basis" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_logical.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_logical.yaml new file mode 100644 index 00000000..4c4ac2e1 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_logical.yaml @@ -0,0 +1,4 @@ +"dataset_name": "logical" +"description": "以下是关于逻辑学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_logical" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_machine_learning.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_machine_learning.yaml new file mode 100644 index 00000000..062cd1cd --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_machine_learning.yaml @@ -0,0 +1,4 @@ +"dataset_name": "machine_learning" +"description": "以下是关于机器学习的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_machine_learning" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_management.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_management.yaml new file mode 100644 index 00000000..aa5681ba --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_management.yaml @@ -0,0 +1,4 @@ +"dataset_name": "management" +"description": "以下是关于管理学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_management" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_marketing.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_marketing.yaml new file mode 100644 index 00000000..a729641f --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_marketing.yaml @@ -0,0 +1,4 @@ +"dataset_name": "marketing" +"description": "以下是关于市场营销的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_marketing" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_marxist_theory.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_marxist_theory.yaml new file mode 100644 index 00000000..f99fa175 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_marxist_theory.yaml @@ -0,0 +1,4 @@ +"dataset_name": "marxist_theory" +"description": "以下是关于马克思主义理论的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_marxist_theory" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_modern_chinese.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_modern_chinese.yaml new file mode 100644 index 00000000..13b2ccc4 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_modern_chinese.yaml @@ -0,0 +1,4 @@ +"dataset_name": "modern_chinese" +"description": "以下是关于现代汉语的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_modern_chinese" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_nutrition.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_nutrition.yaml new file mode 100644 index 00000000..23d52c45 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_nutrition.yaml @@ -0,0 +1,4 @@ +"dataset_name": "nutrition" +"description": "以下是关于营养学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_nutrition" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_philosophy.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_philosophy.yaml new file mode 100644 index 00000000..17340fa4 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_philosophy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "philosophy" +"description": "以下是关于哲学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_philosophy" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_professional_accounting.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_professional_accounting.yaml new file mode 100644 index 00000000..bed3485d --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_professional_accounting.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_accounting" +"description": "以下是关于专业会计的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_professional_accounting" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_professional_law.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_professional_law.yaml new file mode 100644 index 00000000..dec4c6cf --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_professional_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_law" +"description": "以下是关于专业法学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_professional_law" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_professional_medicine.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_professional_medicine.yaml new file mode 100644 index 00000000..92fed45e --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_professional_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_medicine" +"description": "以下是关于专业医学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_professional_medicine" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml new file mode 100644 index 00000000..83f02555 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_psychology" +"description": "以下是关于专业心理学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_professional_psychology" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_public_relations.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_public_relations.yaml new file mode 100644 index 00000000..a1c3711e --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_public_relations.yaml @@ -0,0 +1,4 @@ +"dataset_name": "public_relations" +"description": "以下是关于公共关系的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_public_relations" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_security_study.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_security_study.yaml new file mode 100644 index 00000000..c9660f04 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_security_study.yaml @@ -0,0 +1,4 @@ +"dataset_name": "security_study" +"description": "以下是关于安全研究的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_security_study" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_sociology.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_sociology.yaml new file mode 100644 index 00000000..54f4367d --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_sociology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "sociology" +"description": "以下是关于社会学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_sociology" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_sports_science.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_sports_science.yaml new file mode 100644 index 00000000..35e5bb9c --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_sports_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "sports_science" +"description": "以下是关于体育学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_sports_science" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_traditional_chinese_medicine.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_traditional_chinese_medicine.yaml new file mode 100644 index 00000000..ed4627de --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_traditional_chinese_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "traditional_chinese_medicine" +"description": "以下是关于中医中药的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_traditional_chinese_medicine" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_virology.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_virology.yaml new file mode 100644 index 00000000..1560b84f --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_virology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "virology" +"description": "以下是关于病毒学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_virology" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_world_history.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_world_history.yaml new file mode 100644 index 00000000..993ce0ab --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_world_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "world_history" +"description": "以下是关于世界历史的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_world_history" diff --git a/lm_eval/tasks/cmmlu/cmmlu_default_world_religions.yaml b/lm_eval/tasks/cmmlu/cmmlu_default_world_religions.yaml new file mode 100644 index 00000000..13229fc9 --- /dev/null +++ b/lm_eval/tasks/cmmlu/cmmlu_default_world_religions.yaml @@ -0,0 +1,4 @@ +"dataset_name": "world_religions" +"description": "以下是关于世界宗教的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_world_religions" -- GitLab From b089e5f8a03d33423b02eb6565b04c2d2c4813ea Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 15 Sep 2023 18:28:31 +0000 Subject: [PATCH 179/264] fix storycloze task name --- lm_eval/tasks/storycloze/storycloze_2018.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/storycloze/storycloze_2018.yaml b/lm_eval/tasks/storycloze/storycloze_2018.yaml index 0055bd5c..aa630efc 100644 --- a/lm_eval/tasks/storycloze/storycloze_2018.yaml +++ b/lm_eval/tasks/storycloze/storycloze_2018.yaml @@ -1,5 +1,5 @@ group: storycloze -task: storycloze_2016 +task: storycloze_2018 dataset_path: story_cloze dataset_name: 2018 output_type: multiple_choice -- GitLab From 09ce0c02c61581f38345c63b5e9008385b4c5af9 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 15 Sep 2023 18:38:52 +0000 Subject: [PATCH 180/264] fix crows-pairs readme --- lm_eval/tasks/crows_pairs/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lm_eval/tasks/crows_pairs/README.md b/lm_eval/tasks/crows_pairs/README.md index dbb53d21..9532179d 100644 --- a/lm_eval/tasks/crows_pairs/README.md +++ b/lm_eval/tasks/crows_pairs/README.md @@ -93,10 +93,9 @@ All tasks evaluate the percentage of more-stereotypical sentences that are rated * [x] Is the task an existing benchmark in the literature? * [x] Have you referenced the original paper that introduced the task? * [x] If yes, does the original paper provide a reference implementation? - * [x] The original paper does not for causal language models, so + * [x] The original paper does not for causal language models, so this is a novel formulation of the task for autoregressive LMs. If other tasks on this dataset are already supported: * [x] Is the "Main" variant of this task clearly denoted? * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? * [x] Have you noted which, if any, published evaluation setups are matched by this variant? - * [x] This matches the evaluations performed in the [Pythia paper](https://arxiv.org/abs/2304.01373) -- GitLab From b29ef52157d2d6a92fb69b86dfd02be3cf895a65 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 15 Sep 2023 18:39:18 +0000 Subject: [PATCH 181/264] update cmmlu readme --- lm_eval/tasks/cmmlu/README.md | 167 +++++----------------------------- 1 file changed, 21 insertions(+), 146 deletions(-) diff --git a/lm_eval/tasks/cmmlu/README.md b/lm_eval/tasks/cmmlu/README.md index 27596ee8..7690c205 100644 --- a/lm_eval/tasks/cmmlu/README.md +++ b/lm_eval/tasks/cmmlu/README.md @@ -23,151 +23,26 @@ Homepage: https://github.com/haonan-li/CMMLU } ``` +### Groups and Tasks -| Tasks |Version|Filter| Metric |Value | |Stderr| -|--------------------------------------------|-------|------|--------------------|-----:|---|-----:| -|cmmlu |N/A |none |acc |0.2480| | | -| | |none |acc(sample agg) |0.2494| | | -| | |none |acc_norm |0.2480| | | -| | |none |acc_norm(sample agg)|0.2494| | | -|-cmmlu_modern_chinese |Yaml |none |acc |0.2500|± |0.0404| -| | |none |acc_norm |0.2500|± |0.0404| -|-cmmlu_world_history |Yaml |none |acc |0.2484|± |0.0342| -| | |none |acc_norm |0.2484|± |0.0342| -|-cmmlu_college_education |Yaml |none |acc |0.2523|± |0.0422| -| | |none |acc_norm |0.2523|± |0.0422| -|-cmmlu_international_law |Yaml |none |acc |0.2486|± |0.0319| -| | |none |acc_norm |0.2486|± |0.0319| -|-cmmlu_philosophy |Yaml |none |acc |0.1905|± |0.0385| -| | |none |acc_norm |0.1905|± |0.0385| -|-cmmlu_professional_psychology |Yaml |none |acc |0.2457|± |0.0283| -| | |none |acc_norm |0.2457|± |0.0283| -|-cmmlu_college_engineering_hydrology |Yaml |none |acc |0.2830|± |0.0440| -| | |none |acc_norm |0.2830|± |0.0440| -|-cmmlu_electrical_engineering |Yaml |none |acc |0.2442|± |0.0329| -| | |none |acc_norm |0.2442|± |0.0329| -|-cmmlu_ancient_chinese |Yaml |none |acc |0.2378|± |0.0333| -| | |none |acc_norm |0.2378|± |0.0333| -|-cmmlu_chinese_food_culture |Yaml |none |acc |0.2353|± |0.0365| -| | |none |acc_norm |0.2353|± |0.0365| -|-cmmlu_chinese_literature |Yaml |none |acc |0.2598|± |0.0308| -| | |none |acc_norm |0.2598|± |0.0308| -|-cmmlu_legal_and_moral_basis |Yaml |none |acc |0.2477|± |0.0296| -| | |none |acc_norm |0.2477|± |0.0296| -|-cmmlu_construction_project_management |Yaml |none |acc |0.2374|± |0.0362| -| | |none |acc_norm |0.2374|± |0.0362| -|-cmmlu_ethnology |Yaml |none |acc |0.2519|± |0.0375| -| | |none |acc_norm |0.2519|± |0.0375| -|-cmmlu_high_school_geography |Yaml |none |acc |0.2542|± |0.0403| -| | |none |acc_norm |0.2542|± |0.0403| -|-cmmlu_professional_medicine |Yaml |none |acc |0.2500|± |0.0224| -| | |none |acc_norm |0.2500|± |0.0224| -|-cmmlu_global_facts |Yaml |none |acc |0.2349|± |0.0348| -| | |none |acc_norm |0.2349|± |0.0348| -|-cmmlu_astronomy |Yaml |none |acc |0.2303|± |0.0329| -| | |none |acc_norm |0.2303|± |0.0329| -|-cmmlu_machine_learning |Yaml |none |acc |0.2541|± |0.0396| -| | |none |acc_norm |0.2541|± |0.0396| -|-cmmlu_high_school_politics |Yaml |none |acc |0.2378|± |0.0357| -| | |none |acc_norm |0.2378|± |0.0357| -|-cmmlu_chinese_civil_service_exam |Yaml |none |acc |0.2562|± |0.0346| -| | |none |acc_norm |0.2562|± |0.0346| -|-cmmlu_professional_law |Yaml |none |acc |0.2512|± |0.0299| -| | |none |acc_norm |0.2512|± |0.0299| -|-cmmlu_college_medical_statistics |Yaml |none |acc |0.2453|± |0.0420| -| | |none |acc_norm |0.2453|± |0.0420| -|-cmmlu_computer_security |Yaml |none |acc |0.2573|± |0.0335| -| | |none |acc_norm |0.2573|± |0.0335| -|-cmmlu_food_science |Yaml |none |acc |0.2238|± |0.0350| -| | |none |acc_norm |0.2238|± |0.0350| -|-cmmlu_security_study |Yaml |none |acc |0.2519|± |0.0375| -| | |none |acc_norm |0.2519|± |0.0375| -|-cmmlu_high_school_physics |Yaml |none |acc |0.2545|± |0.0417| -| | |none |acc_norm |0.2545|± |0.0417| -|-cmmlu_management |Yaml |none |acc |0.2476|± |0.0299| -| | |none |acc_norm |0.2476|± |0.0299| -|-cmmlu_professional_accounting |Yaml |none |acc |0.2514|± |0.0329| -| | |none |acc_norm |0.2514|± |0.0329| -|-cmmlu_human_sexuality |Yaml |none |acc |0.2222|± |0.0372| -| | |none |acc_norm |0.2222|± |0.0372| -|-cmmlu_marxist_theory |Yaml |none |acc |0.2487|± |0.0315| -| | |none |acc_norm |0.2487|± |0.0315| -|-cmmlu_agronomy |Yaml |none |acc |0.2426|± |0.0331| -| | |none |acc_norm |0.2426|± |0.0331| -|-cmmlu_chinese_teacher_qualification |Yaml |none |acc |0.2626|± |0.0330| -| | |none |acc_norm |0.2626|± |0.0330| -|-cmmlu_genetics |Yaml |none |acc |0.2273|± |0.0317| -| | |none |acc_norm |0.2273|± |0.0317| -|-cmmlu_sports_science |Yaml |none |acc |0.2727|± |0.0348| -| | |none |acc_norm |0.2727|± |0.0348| -|-cmmlu_elementary_commonsense |Yaml |none |acc |0.2424|± |0.0305| -| | |none |acc_norm |0.2424|± |0.0305| -|-cmmlu_logical |Yaml |none |acc |0.1951|± |0.0359| -| | |none |acc_norm |0.1951|± |0.0359| -|-cmmlu_chinese_history |Yaml |none |acc |0.2508|± |0.0242| -| | |none |acc_norm |0.2508|± |0.0242| -|-cmmlu_traditional_chinese_medicine |Yaml |none |acc |0.2378|± |0.0314| -| | |none |acc_norm |0.2378|± |0.0314| -|-cmmlu_elementary_mathematics |Yaml |none |acc |0.2609|± |0.0290| -| | |none |acc_norm |0.2609|± |0.0290| -|-cmmlu_nutrition |Yaml |none |acc |0.2552|± |0.0363| -| | |none |acc_norm |0.2552|± |0.0363| -|-cmmlu_chinese_foreign_policy |Yaml |none |acc |0.1776|± |0.0371| -| | |none |acc_norm |0.1776|± |0.0371| -|-cmmlu_journalism |Yaml |none |acc |0.2616|± |0.0336| -| | |none |acc_norm |0.2616|± |0.0336| -|-cmmlu_jurisprudence |Yaml |none |acc |0.2506|± |0.0214| -| | |none |acc_norm |0.2506|± |0.0214| -|-cmmlu_sociology |Yaml |none |acc |0.2478|± |0.0288| -| | |none |acc_norm |0.2478|± |0.0288| -|-cmmlu_college_mathematics |Yaml |none |acc |0.2190|± |0.0406| -| | |none |acc_norm |0.2190|± |0.0406| -|-cmmlu_computer_science |Yaml |none |acc |0.2549|± |0.0306| -| | |none |acc_norm |0.2549|± |0.0306| -|-cmmlu_conceptual_physics |Yaml |none |acc |0.2517|± |0.0359| -| | |none |acc_norm |0.2517|± |0.0359| -|-cmmlu_elementary_chinese |Yaml |none |acc |0.2817|± |0.0284| -| | |none |acc_norm |0.2817|± |0.0284| -|-cmmlu_marketing |Yaml |none |acc |0.2500|± |0.0324| -| | |none |acc_norm |0.2500|± |0.0324| -|-cmmlu_high_school_chemistry |Yaml |none |acc |0.2576|± |0.0382| -| | |none |acc_norm |0.2576|± |0.0382| -|-cmmlu_college_law |Yaml |none |acc |0.2315|± |0.0408| -| | |none |acc_norm |0.2315|± |0.0408| -|-cmmlu_chinese_driving_rule |Yaml |none |acc |0.2595|± |0.0384| -| | |none |acc_norm |0.2595|± |0.0384| -|-cmmlu_clinical_knowledge |Yaml |none |acc |0.2532|± |0.0283| -| | |none |acc_norm |0.2532|± |0.0283| -|-cmmlu_education |Yaml |none |acc |0.2761|± |0.0351| -| | |none |acc_norm |0.2761|± |0.0351| -|-cmmlu_high_school_mathematics |Yaml |none |acc |0.2927|± |0.0356| -| | |none |acc_norm |0.2927|± |0.0356| -|-cmmlu_college_actuarial_science |Yaml |none |acc |0.2736|± |0.0435| -| | |none |acc_norm |0.2736|± |0.0435| -|-cmmlu_arts |Yaml |none |acc |0.2313|± |0.0334| -| | |none |acc_norm |0.2313|± |0.0334| -|-cmmlu_public_relations |Yaml |none |acc |0.2471|± |0.0328| -| | |none |acc_norm |0.2471|± |0.0328| -|-cmmlu_college_medicine |Yaml |none |acc |0.2418|± |0.0260| -| | |none |acc_norm |0.2418|± |0.0260| -|-cmmlu_economics |Yaml |none |acc |0.2453|± |0.0342| -| | |none |acc_norm |0.2453|± |0.0342| -|-cmmlu_elementary_information_and_technology|Yaml |none |acc |0.2731|± |0.0289| -| | |none |acc_norm |0.2731|± |0.0289| -|-cmmlu_anatomy |Yaml |none |acc |0.2432|± |0.0354| -| | |none |acc_norm |0.2432|± |0.0354| -|-cmmlu_world_religions |Yaml |none |acc |0.2875|± |0.0359| -| | |none |acc_norm |0.2875|± |0.0359| -|-cmmlu_virology |Yaml |none |acc |0.2485|± |0.0333| -| | |none |acc_norm |0.2485|± |0.0333| -|-cmmlu_high_school_biology |Yaml |none |acc |0.2485|± |0.0333| -| | |none |acc_norm |0.2485|± |0.0333| -|-cmmlu_business_ethics |Yaml |none |acc |0.2584|± |0.0304| -| | |none |acc_norm |0.2584|± |0.0304| +#### Groups -|Groups|Version|Filter| Metric |Value | |Stderr| -|------|-------|------|--------------------|-----:|---|------| -|cmmlu |N/A |none |acc |0.2480| | | -| | |none |acc(sample agg) |0.2494| | | -| | |none |acc_norm |0.2480| | | -| | |none |acc_norm(sample agg)|0.2494| | | +- `cmmlu`: All 67 subjects of the CMMLU dataset, evaluated following the methodology in MMLU's original implementation. + +#### Tasks + + +The following tasks evaluate subjects in the CMMLU dataset using loglikelihood-based multiple-choice scoring: +- `cmmlu_{subject_english}` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? + * [x] Yes, original implementation contributed by author of the benchmark + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? -- GitLab From 7c2687cb31abb2ac1ea7f81673eb8e8fba4d30a9 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 15 Sep 2023 18:46:04 +0000 Subject: [PATCH 182/264] update typehints in decontam tool --- lm_eval/decontamination/janitor.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lm_eval/decontamination/janitor.py b/lm_eval/decontamination/janitor.py index 209fa8ef..5ad84d13 100644 --- a/lm_eval/decontamination/janitor.py +++ b/lm_eval/decontamination/janitor.py @@ -3,7 +3,7 @@ import string import pickle import traceback from pprint import pprint -from typing import Iterator, Sequence, TypeVar +from typing import Iterator, Sequence, TypeVar, List, Tuple # This is a cpp module. Compile janitor_util.cpp with: # c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) janitor_util.cpp -o janitor_util$(python3-config --extension-suffix) -undefined dynamic_lookup @@ -21,7 +21,7 @@ T = TypeVar("T") # Implementation from nltk source # https://www.nltk.org/_modules/nltk/util.html -def form_ngrams(sequence: Iterator[T], n: int) -> Iterator[tuple[T, ...]]: +def form_ngrams(sequence: Iterator[T], n: int) -> Iterator[Tuple[T, ...]]: history = [] while n > 1: # PEP 479, prevent RuntimeError from being raised when StopIteration bubbles out of generator @@ -70,14 +70,14 @@ def word_ngrams(s: str, n: int) -> Iterator[str]: # https://stackoverflow.com/questions/13734451/string-split-with-indices-in-python -def split_indices(s: str) -> Iterator[tuple[str, tuple[int, int]]]: +def split_indices(s: str) -> Iterator[Tuple[str, Tuple[int, int]]]: """Splits a string on whitespaces and records the indices of each in the original string. @:return generator((word, (start_idx, end_idx)), ...) """ return ((m.group(0), (m.start(), m.end() - 1)) for m in re.finditer(r"\S+", s)) -def word_ngrams_indices(s: str, n: int) -> Iterator[tuple[str, tuple[int, int]]]: +def word_ngrams_indices(s: str, n: int) -> Iterator[Tuple[str, Tuple[int, int]]]: """Splits a string into pairs of (ngram words, their start/end indices)""" tokens_with_indices = split_indices(s) @@ -157,7 +157,7 @@ class Janitor: print("WARNING: Janitor running in python mode") return self.register_contaminant_python(dirt_string) - def clean(self, dirty_string: str) -> list[str]: + def clean(self, dirty_string: str) -> List[str]: """Clean a string (e.g. a training set) by removing all ngrams previously registered as contaminants. Returns a list of clean chunks, or empty if the string was too dirty""" @@ -168,8 +168,8 @@ class Janitor: return self.clean_python(dirty_string) def _split_chunks( - self, dirty_string: str, dirty_parts: Sequence[tuple] - ) -> list[str]: + self, dirty_string: str, dirty_parts: Sequence[Tuple] + ) -> List[str]: clean_chunks = [] splice_idx = 0 end = -1 @@ -197,7 +197,7 @@ class Janitor: janitor_util.clean_ngram(dirt_string, self.delete_chars, self.ngram_n) ) - def clean_cpp(self, dirty_string: str) -> list[str]: + def clean_cpp(self, dirty_string: str) -> List[str]: contamination_indices = janitor_util.clean_ngram_with_indices( dirty_string, self.delete_chars, self.ngram_n ) @@ -215,7 +215,7 @@ class Janitor: word_ngrams(self.normalize_string(dirt_string), self.ngram_n) ) - def clean_python(self, dirty_string: str) -> list[str]: + def clean_python(self, dirty_string: str) -> List[str]: contamination_indices = ( (None, *idx_pair) for dirty_ngram, idx_pair in word_ngrams_indices(dirty_string, self.ngram_n) -- GitLab From 4ff8260dfbbe77d887ceeb11fde45bb9b141022c Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 15 Sep 2023 18:51:53 +0000 Subject: [PATCH 183/264] allow for trailing comma in model_args --- lm_eval/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/utils.py b/lm_eval/utils.py index 6cf97331..1452742d 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -54,7 +54,7 @@ def simple_parse_args_string(args_string): args_string = args_string.strip() if not args_string: return {} - arg_list = args_string.split(",") + arg_list = [arg for arg in args_string.split(",") if arg] args_dict = {k: v for k, v in [arg.split("=") for arg in arg_list]} return args_dict -- GitLab From 020c41156dce45f27017cc9cb6b0226e1373b055 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 15 Sep 2023 19:25:58 +0000 Subject: [PATCH 184/264] port csatqa to yaml --- lm_eval/tasks/csatqa/_default_csatqa_yaml | 15 +++++++ lm_eval/tasks/csatqa/_generate_configs.py | 51 +++++++++++++++++++++++ lm_eval/tasks/csatqa/csatqa_gr.yaml | 3 ++ lm_eval/tasks/csatqa/csatqa_li.yaml | 3 ++ lm_eval/tasks/csatqa/csatqa_rch.yaml | 3 ++ lm_eval/tasks/csatqa/csatqa_rcs.yaml | 3 ++ lm_eval/tasks/csatqa/csatqa_rcss.yaml | 3 ++ lm_eval/tasks/csatqa/csatqa_wr.yaml | 3 ++ lm_eval/tasks/csatqa/utils.py | 20 +++++++++ 9 files changed, 104 insertions(+) create mode 100644 lm_eval/tasks/csatqa/_default_csatqa_yaml create mode 100644 lm_eval/tasks/csatqa/_generate_configs.py create mode 100644 lm_eval/tasks/csatqa/csatqa_gr.yaml create mode 100644 lm_eval/tasks/csatqa/csatqa_li.yaml create mode 100644 lm_eval/tasks/csatqa/csatqa_rch.yaml create mode 100644 lm_eval/tasks/csatqa/csatqa_rcs.yaml create mode 100644 lm_eval/tasks/csatqa/csatqa_rcss.yaml create mode 100644 lm_eval/tasks/csatqa/csatqa_wr.yaml create mode 100644 lm_eval/tasks/csatqa/utils.py diff --git a/lm_eval/tasks/csatqa/_default_csatqa_yaml b/lm_eval/tasks/csatqa/_default_csatqa_yaml new file mode 100644 index 00000000..11b93625 --- /dev/null +++ b/lm_eval/tasks/csatqa/_default_csatqa_yaml @@ -0,0 +1,15 @@ +group: csatqa +dataset_path: EleutherAI/csatqa +test_split: test +output_type: multiple_choice +process_docs: !function utils.process_docs +doc_to_text: "{{question}}" +doc_to_choice: "{{choices}}" +doc_to_target: "{{gold}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true diff --git a/lm_eval/tasks/csatqa/_generate_configs.py b/lm_eval/tasks/csatqa/_generate_configs.py new file mode 100644 index 00000000..ca2bfc43 --- /dev/null +++ b/lm_eval/tasks/csatqa/_generate_configs.py @@ -0,0 +1,51 @@ +""" +Take in a YAML, and output all other splits with this YAML +""" +import os +import yaml +import argparse + +from tqdm import tqdm + +from lm_eval.logger import eval_logger + +SUBSETS = ["WR", "GR", "RCS", "RCSS", "RCH", "LI"] + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument("--save_prefix_path", default="csatqa") + parser.add_argument("--task_prefix", default="") + return parser.parse_args() + + +if __name__ == "__main__": + + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our other YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path) as f: + base_yaml = yaml.full_load(f) + + for name in tqdm(SUBSETS): + + yaml_dict = { + "include": base_yaml_name, + "task": f"csatqa_{args.task_prefix}_{name}" + if args.task_prefix != "" + else f"csatqa_{name.lower()}", + "dataset_name": name, + } + + file_save_path = args.save_prefix_path + f"_{name.lower()}.yaml" + eval_logger.info(f"Saving yaml for subset {name} to {file_save_path}") + with open(file_save_path, "w") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + width=float("inf"), + allow_unicode=True, + default_style='"', + ) diff --git a/lm_eval/tasks/csatqa/csatqa_gr.yaml b/lm_eval/tasks/csatqa/csatqa_gr.yaml new file mode 100644 index 00000000..856ba617 --- /dev/null +++ b/lm_eval/tasks/csatqa/csatqa_gr.yaml @@ -0,0 +1,3 @@ +"dataset_name": "GR" +"include": "_default_csatqa_yaml" +"task": "csatqa_gr" diff --git a/lm_eval/tasks/csatqa/csatqa_li.yaml b/lm_eval/tasks/csatqa/csatqa_li.yaml new file mode 100644 index 00000000..eb7e1783 --- /dev/null +++ b/lm_eval/tasks/csatqa/csatqa_li.yaml @@ -0,0 +1,3 @@ +"dataset_name": "LI" +"include": "_default_csatqa_yaml" +"task": "csatqa_li" diff --git a/lm_eval/tasks/csatqa/csatqa_rch.yaml b/lm_eval/tasks/csatqa/csatqa_rch.yaml new file mode 100644 index 00000000..4eaffaa9 --- /dev/null +++ b/lm_eval/tasks/csatqa/csatqa_rch.yaml @@ -0,0 +1,3 @@ +"dataset_name": "RCH" +"include": "_default_csatqa_yaml" +"task": "csatqa_rch" diff --git a/lm_eval/tasks/csatqa/csatqa_rcs.yaml b/lm_eval/tasks/csatqa/csatqa_rcs.yaml new file mode 100644 index 00000000..e2efcc17 --- /dev/null +++ b/lm_eval/tasks/csatqa/csatqa_rcs.yaml @@ -0,0 +1,3 @@ +"dataset_name": "RCS" +"include": "_default_csatqa_yaml" +"task": "csatqa_rcs" diff --git a/lm_eval/tasks/csatqa/csatqa_rcss.yaml b/lm_eval/tasks/csatqa/csatqa_rcss.yaml new file mode 100644 index 00000000..7b099fe2 --- /dev/null +++ b/lm_eval/tasks/csatqa/csatqa_rcss.yaml @@ -0,0 +1,3 @@ +"dataset_name": "RCSS" +"include": "_default_csatqa_yaml" +"task": "csatqa_rcss" diff --git a/lm_eval/tasks/csatqa/csatqa_wr.yaml b/lm_eval/tasks/csatqa/csatqa_wr.yaml new file mode 100644 index 00000000..6276dfef --- /dev/null +++ b/lm_eval/tasks/csatqa/csatqa_wr.yaml @@ -0,0 +1,3 @@ +"dataset_name": "WR" +"include": "_default_csatqa_yaml" +"task": "csatqa_wr" diff --git a/lm_eval/tasks/csatqa/utils.py b/lm_eval/tasks/csatqa/utils.py new file mode 100644 index 00000000..253bc1b6 --- /dev/null +++ b/lm_eval/tasks/csatqa/utils.py @@ -0,0 +1,20 @@ +import datasets + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc): + instruction = f"""다음을 읽고 정답으로 알맞은 것을 고르시요. +### Context: {doc["context"]} +### Question: {doc["question"]} +### Options: +(1) {doc['option#1']}\n(2) {doc["option#2"]}\n(3) {doc["option#3"]}\n(4) {doc['option#4']}\n(5) {doc['option#5']} +### Answer: 주어진 문제의 정답은""" + + out_doc = { + "question": instruction, + "choices": ["(1)", "(2)", "(3)", "(4)", "(5)"], + "gold": int(doc["gold"]) - 1, + } + return out_doc + + return dataset.map(_process_doc) -- GitLab From 2d5d94da064a241a6c3813276b5faabaa73126fb Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 15 Sep 2023 20:27:28 +0000 Subject: [PATCH 185/264] add ceval --- lm_eval/tasks/ceval/README.md | 127 ++++++++++++++++++ lm_eval/tasks/ceval/_default_ceval_yaml | 19 +++ lm_eval/tasks/ceval/_generate_configs.py | 116 ++++++++++++++++ .../tasks/ceval/ceval-valid_accountant.yaml | 4 + .../ceval-valid_advanced_mathematics.yaml | 4 + .../tasks/ceval/ceval-valid_art_studies.yaml | 4 + .../ceval/ceval-valid_basic_medicine.yaml | 4 + .../ceval-valid_business_administration.yaml | 4 + ...valid_chinese_language_and_literature.yaml | 4 + .../ceval/ceval-valid_civil_servant.yaml | 4 + .../ceval/ceval-valid_clinical_medicine.yaml | 4 + .../ceval/ceval-valid_college_chemistry.yaml | 4 + .../ceval/ceval-valid_college_economics.yaml | 4 + .../ceval/ceval-valid_college_physics.yaml | 4 + .../ceval-valid_college_programming.yaml | 4 + .../ceval-valid_computer_architecture.yaml | 4 + .../ceval/ceval-valid_computer_network.yaml | 4 + .../ceval-valid_discrete_mathematics.yaml | 4 + .../ceval/ceval-valid_education_science.yaml | 4 + .../ceval-valid_electrical_engineer.yaml | 4 + ...ironmental_impact_assessment_engineer.yaml | 4 + .../ceval/ceval-valid_fire_engineer.yaml | 4 + .../ceval-valid_high_school_biology.yaml | 4 + .../ceval-valid_high_school_chemistry.yaml | 4 + .../ceval-valid_high_school_chinese.yaml | 4 + .../ceval-valid_high_school_geography.yaml | 4 + .../ceval-valid_high_school_history.yaml | 4 + .../ceval-valid_high_school_mathematics.yaml | 4 + .../ceval-valid_high_school_physics.yaml | 4 + .../ceval-valid_high_school_politics.yaml | 4 + ...lid_ideological_and_moral_cultivation.yaml | 4 + lm_eval/tasks/ceval/ceval-valid_law.yaml | 4 + .../ceval/ceval-valid_legal_professional.yaml | 4 + lm_eval/tasks/ceval/ceval-valid_logic.yaml | 4 + .../ceval/ceval-valid_mao_zedong_thought.yaml | 4 + lm_eval/tasks/ceval/ceval-valid_marxism.yaml | 4 + .../ceval/ceval-valid_metrology_engineer.yaml | 4 + .../ceval-valid_middle_school_biology.yaml | 4 + .../ceval-valid_middle_school_chemistry.yaml | 4 + .../ceval-valid_middle_school_geography.yaml | 4 + .../ceval-valid_middle_school_history.yaml | 4 + ...ceval-valid_middle_school_mathematics.yaml | 4 + .../ceval-valid_middle_school_physics.yaml | 4 + .../ceval-valid_middle_school_politics.yaml | 4 + .../ceval-valid_modern_chinese_history.yaml | 4 + .../ceval/ceval-valid_operating_system.yaml | 4 + .../tasks/ceval/ceval-valid_physician.yaml | 4 + .../ceval/ceval-valid_plant_protection.yaml | 4 + ...eval-valid_probability_and_statistics.yaml | 4 + .../ceval-valid_professional_tour_guide.yaml | 4 + .../ceval/ceval-valid_sports_science.yaml | 4 + .../ceval/ceval-valid_tax_accountant.yaml | 4 + .../ceval-valid_teacher_qualification.yaml | 4 + .../ceval-valid_urban_and_rural_planner.yaml | 4 + .../ceval-valid_veterinary_medicine.yaml | 4 + 55 files changed, 470 insertions(+) create mode 100644 lm_eval/tasks/ceval/README.md create mode 100644 lm_eval/tasks/ceval/_default_ceval_yaml create mode 100644 lm_eval/tasks/ceval/_generate_configs.py create mode 100644 lm_eval/tasks/ceval/ceval-valid_accountant.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_art_studies.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_business_administration.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_college_economics.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_college_physics.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_college_programming.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_computer_network.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_education_science.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_environmental_impact_assessment_engineer.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_high_school_chinese.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_law.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_logic.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_marxism.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_metrology_engineer.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_middle_school_mathematics.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_middle_school_physics.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_middle_school_politics.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_modern_chinese_history.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_operating_system.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_physician.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_plant_protection.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_probability_and_statistics.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_professional_tour_guide.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_sports_science.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_tax_accountant.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_teacher_qualification.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_urban_and_rural_planner.yaml create mode 100644 lm_eval/tasks/ceval/ceval-valid_veterinary_medicine.yaml diff --git a/lm_eval/tasks/ceval/README.md b/lm_eval/tasks/ceval/README.md new file mode 100644 index 00000000..091b8bb6 --- /dev/null +++ b/lm_eval/tasks/ceval/README.md @@ -0,0 +1,127 @@ +# C-Eval (Validation) + +### Paper +C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models +https://arxiv.org/pdf/2305.08322.pdf + +C-Eval is a comprehensive Chinese evaluation suite for foundation models. +It consists of 13948 multi-choice questions spanning 52 diverse disciplines +and four difficulty levels. + +Homepage: https://cevalbenchmark.com/ + +### Citation + +```bibtex +@article{huang2023ceval, + title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models}, + author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian}, + journal={arXiv preprint arXiv:2305.08322}, + year={2023} +} +``` + + +SUBJECTS = { + "computer_network":"计算机网络", + "operating_system":"操作系统", + "computer_architecture":"计算机组成", + "college_programming":"大学编程", + "college_physics":"大学物理", + "college_chemistry":"大学化学", + "advanced_mathematics":"高等数学", + "probability_and_statistics":"概率统计", + "discrete_mathematics":"离散数学", + "electrical_engineer":"注册电气工程师", + "metrology_engineer":"注册计量师", + "high_school_mathematics":"高中数学", + "high_school_physics":"高中物理", + "high_school_chemistry":"高中化学", + "high_school_biology":"高中生物", + "middle_school_mathematics":"初中数学", + "middle_school_biology":"初中生物", + "middle_school_physics":"初中物理", + "middle_school_chemistry":"初中化学", + "veterinary_medicine":"兽医学", + "college_economics":"大学经济学", + "business_administration":"工商管理", + "marxism":"马克思主义基本原理", + "mao_zedong_thought":"毛泽东思想和中国特色社会主义理论体系概论", + "education_science":"教育学", + "teacher_qualification":"教师资格", + "high_school_politics":"高中政治", + "high_school_geography":"高中地理", + "middle_school_politics":"初中政治", + "middle_school_geography":"初中地理", + "modern_chinese_history":"近代史纲要", + "ideological_and_moral_cultivation":"思想道德修养与法律基础", + "logic":"逻辑学", + "law":"法学", + "chinese_language_and_literature":"中国语言文学", + "art_studies":"艺术学", + "professional_tour_guide":"导游资格", + "legal_professional":"法律职业资格", + "high_school_chinese":"高中语文", + "high_school_history":"高中历史", + "middle_school_history":"初中历史", + "civil_servant":"公务员", + "sports_science":"体育学", + "plant_protection":"植物保护", + "basic_medicine":"基础医学", + "clinical_medicine":"临床医学", + "urban_and_rural_planner":"注册城乡规划师", + "accountant":"注册会计师", + "fire_engineer":"注册消防工程师", + "environmental_impact_assessment_engineer":"环境影响评价工程师", + "tax_accountant":"税务师", + "physician":"医师资格" +} + + +# CMMLU + +### Paper + +CMMLU: Measuring massive multitask language understanding in Chinese +https://arxiv.org/abs/2306.09212 + +CMMLU is a comprehensive evaluation benchmark specifically designed to evaluate the knowledge and reasoning abilities of LLMs within the context of Chinese language and culture. +CMMLU covers a wide range of subjects, comprising 67 topics that span from elementary to advanced professional levels. + +Homepage: https://github.com/haonan-li/CMMLU + +### Citation + +```bibtex +@misc{li2023cmmlu, + title={CMMLU: Measuring massive multitask language understanding in Chinese}, + author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin}, + year={2023}, + eprint={2306.09212}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +- `ceval-valid`: All 52 subjects of the C-Eval dataset, evaluated following the methodology in MMLU's original implementation. This implementation consists solely of the validation set of C-Eval, as the test set requires submission of model predictions to an external site. + +#### Tasks + + +The following tasks evaluate subjects in the C-Eval dataset using loglikelihood-based multiple-choice scoring: +- `ceval-valid_{subject_english}` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/ceval/_default_ceval_yaml b/lm_eval/tasks/ceval/_default_ceval_yaml new file mode 100644 index 00000000..79f92cdd --- /dev/null +++ b/lm_eval/tasks/ceval/_default_ceval_yaml @@ -0,0 +1,19 @@ +group: ceval-valid +dataset_path: ceval/ceval-exam +validation_split: val +fewshot_split: dev +fewshot_config: + sampler: first_n +output_type: multiple_choice +doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: "{{['A', 'B', 'C', 'D'].index(answer)}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: "1.0" diff --git a/lm_eval/tasks/ceval/_generate_configs.py b/lm_eval/tasks/ceval/_generate_configs.py new file mode 100644 index 00000000..deaa0372 --- /dev/null +++ b/lm_eval/tasks/ceval/_generate_configs.py @@ -0,0 +1,116 @@ +""" +Take in a YAML, and output all other splits with this YAML +""" +import os +import yaml +import argparse + +from tqdm import tqdm + +from lm_eval.logger import eval_logger + +SUBJECTS = { + "computer_network": "计算机网络", + "operating_system": "操作系统", + "computer_architecture": "计算机组成", + "college_programming": "大学编程", + "college_physics": "大学物理", + "college_chemistry": "大学化学", + "advanced_mathematics": "高等数学", + "probability_and_statistics": "概率统计", + "discrete_mathematics": "离散数学", + "electrical_engineer": "注册电气工程师", + "metrology_engineer": "注册计量师", + "high_school_mathematics": "高中数学", + "high_school_physics": "高中物理", + "high_school_chemistry": "高中化学", + "high_school_biology": "高中生物", + "middle_school_mathematics": "初中数学", + "middle_school_biology": "初中生物", + "middle_school_physics": "初中物理", + "middle_school_chemistry": "初中化学", + "veterinary_medicine": "兽医学", + "college_economics": "大学经济学", + "business_administration": "工商管理", + "marxism": "马克思主义基本原理", + "mao_zedong_thought": "毛泽东思想和中国特色社会主义理论体系概论", + "education_science": "教育学", + "teacher_qualification": "教师资格", + "high_school_politics": "高中政治", + "high_school_geography": "高中地理", + "middle_school_politics": "初中政治", + "middle_school_geography": "初中地理", + "modern_chinese_history": "近代史纲要", + "ideological_and_moral_cultivation": "思想道德修养与法律基础", + "logic": "逻辑学", + "law": "法学", + "chinese_language_and_literature": "中国语言文学", + "art_studies": "艺术学", + "professional_tour_guide": "导游资格", + "legal_professional": "法律职业资格", + "high_school_chinese": "高中语文", + "high_school_history": "高中历史", + "middle_school_history": "初中历史", + "civil_servant": "公务员", + "sports_science": "体育学", + "plant_protection": "植物保护", + "basic_medicine": "基础医学", + "clinical_medicine": "临床医学", + "urban_and_rural_planner": "注册城乡规划师", + "accountant": "注册会计师", + "fire_engineer": "注册消防工程师", + "environmental_impact_assessment_engineer": "环境影响评价工程师", + "tax_accountant": "税务师", + "physician": "医师资格", +} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument("--save_prefix_path", default="ceval-valid") + parser.add_argument("--cot_prompt_path", default=None) + parser.add_argument("--task_prefix", default="") + return parser.parse_args() + + +if __name__ == "__main__": + + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our other YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path) as f: + base_yaml = yaml.full_load(f) + + if args.cot_prompt_path is not None: + import json + + with open(args.cot_prompt_path) as f: + cot_file = json.load(f) + + for subject_eng, subject_zh in tqdm(SUBJECTS.items()): + if args.cot_prompt_path is not None: + description = cot_file[subject_eng] + else: + description = f"以下是中国关于{subject_zh}的单项选择题,请选出其中的正确答案。\n\n" + + yaml_dict = { + "include": base_yaml_name, + "task": f"ceval-valid_{args.task_prefix}_{subject_eng}" + if args.task_prefix != "" + else f"ceval-valid_{subject_eng}", + "dataset_name": subject_eng, + "description": description, + } + + file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml" + eval_logger.info(f"Saving yaml for subset {subject_eng} to {file_save_path}") + with open(file_save_path, "w") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + width=float("inf"), + allow_unicode=True, + default_style='"', + ) diff --git a/lm_eval/tasks/ceval/ceval-valid_accountant.yaml b/lm_eval/tasks/ceval/ceval-valid_accountant.yaml new file mode 100644 index 00000000..04f669ed --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_accountant.yaml @@ -0,0 +1,4 @@ +"dataset_name": "accountant" +"description": "以下是中国关于注册会计师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_accountant" diff --git a/lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml b/lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml new file mode 100644 index 00000000..9778347b --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "advanced_mathematics" +"description": "以下是中国关于高等数学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_advanced_mathematics" diff --git a/lm_eval/tasks/ceval/ceval-valid_art_studies.yaml b/lm_eval/tasks/ceval/ceval-valid_art_studies.yaml new file mode 100644 index 00000000..62722626 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_art_studies.yaml @@ -0,0 +1,4 @@ +"dataset_name": "art_studies" +"description": "以下是中国关于艺术学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_art_studies" diff --git a/lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml b/lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml new file mode 100644 index 00000000..907bf8eb --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "basic_medicine" +"description": "以下是中国关于基础医学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_basic_medicine" diff --git a/lm_eval/tasks/ceval/ceval-valid_business_administration.yaml b/lm_eval/tasks/ceval/ceval-valid_business_administration.yaml new file mode 100644 index 00000000..b1f96f33 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_business_administration.yaml @@ -0,0 +1,4 @@ +"dataset_name": "business_administration" +"description": "以下是中国关于工商管理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_business_administration" diff --git a/lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml b/lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml new file mode 100644 index 00000000..e285b59d --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_language_and_literature" +"description": "以下是中国关于中国语言文学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_chinese_language_and_literature" diff --git a/lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml b/lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml new file mode 100644 index 00000000..0aad21b9 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml @@ -0,0 +1,4 @@ +"dataset_name": "civil_servant" +"description": "以下是中国关于公务员的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_civil_servant" diff --git a/lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml b/lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml new file mode 100644 index 00000000..97c08d06 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "clinical_medicine" +"description": "以下是中国关于临床医学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_clinical_medicine" diff --git a/lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml b/lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml new file mode 100644 index 00000000..9ba89714 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_chemistry" +"description": "以下是中国关于大学化学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_college_chemistry" diff --git a/lm_eval/tasks/ceval/ceval-valid_college_economics.yaml b/lm_eval/tasks/ceval/ceval-valid_college_economics.yaml new file mode 100644 index 00000000..10b89f8f --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_college_economics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_economics" +"description": "以下是中国关于大学经济学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_college_economics" diff --git a/lm_eval/tasks/ceval/ceval-valid_college_physics.yaml b/lm_eval/tasks/ceval/ceval-valid_college_physics.yaml new file mode 100644 index 00000000..50e7f01c --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_college_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_physics" +"description": "以下是中国关于大学物理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_college_physics" diff --git a/lm_eval/tasks/ceval/ceval-valid_college_programming.yaml b/lm_eval/tasks/ceval/ceval-valid_college_programming.yaml new file mode 100644 index 00000000..4cfe029a --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_college_programming.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_programming" +"description": "以下是中国关于大学编程的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_college_programming" diff --git a/lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml b/lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml new file mode 100644 index 00000000..d17454a7 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_architecture" +"description": "以下是中国关于计算机组成的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_computer_architecture" diff --git a/lm_eval/tasks/ceval/ceval-valid_computer_network.yaml b/lm_eval/tasks/ceval/ceval-valid_computer_network.yaml new file mode 100644 index 00000000..9391dbbd --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_computer_network.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_network" +"description": "以下是中国关于计算机网络的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_computer_network" diff --git a/lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml b/lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml new file mode 100644 index 00000000..f2bd4204 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "discrete_mathematics" +"description": "以下是中国关于离散数学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_discrete_mathematics" diff --git a/lm_eval/tasks/ceval/ceval-valid_education_science.yaml b/lm_eval/tasks/ceval/ceval-valid_education_science.yaml new file mode 100644 index 00000000..985edf98 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_education_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "education_science" +"description": "以下是中国关于教育学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_education_science" diff --git a/lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml b/lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml new file mode 100644 index 00000000..cc946b99 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml @@ -0,0 +1,4 @@ +"dataset_name": "electrical_engineer" +"description": "以下是中国关于注册电气工程师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_electrical_engineer" diff --git a/lm_eval/tasks/ceval/ceval-valid_environmental_impact_assessment_engineer.yaml b/lm_eval/tasks/ceval/ceval-valid_environmental_impact_assessment_engineer.yaml new file mode 100644 index 00000000..d6ed06fd --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_environmental_impact_assessment_engineer.yaml @@ -0,0 +1,4 @@ +"dataset_name": "environmental_impact_assessment_engineer" +"description": "以下是中国关于环境影响评价工程师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_environmental_impact_assessment_engineer" diff --git a/lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml b/lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml new file mode 100644 index 00000000..ddb6fa77 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml @@ -0,0 +1,4 @@ +"dataset_name": "fire_engineer" +"description": "以下是中国关于注册消防工程师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_fire_engineer" diff --git a/lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml b/lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml new file mode 100644 index 00000000..26cbc8b5 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_biology" +"description": "以下是中国关于高中生物的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_biology" diff --git a/lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml b/lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml new file mode 100644 index 00000000..8e140af2 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_chemistry" +"description": "以下是中国关于高中化学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_chemistry" diff --git a/lm_eval/tasks/ceval/ceval-valid_high_school_chinese.yaml b/lm_eval/tasks/ceval/ceval-valid_high_school_chinese.yaml new file mode 100644 index 00000000..0a2befa8 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_high_school_chinese.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_chinese" +"description": "以下是中国关于高中语文的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_chinese" diff --git a/lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml b/lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml new file mode 100644 index 00000000..a2bb10ca --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_geography" +"description": "以下是中国关于高中地理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_geography" diff --git a/lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml b/lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml new file mode 100644 index 00000000..9335bc0f --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_history" +"description": "以下是中国关于高中历史的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_history" diff --git a/lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml b/lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml new file mode 100644 index 00000000..abc00afa --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_mathematics" +"description": "以下是中国关于高中数学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_mathematics" diff --git a/lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml b/lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml new file mode 100644 index 00000000..e3aa084a --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_physics" +"description": "以下是中国关于高中物理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_physics" diff --git a/lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml b/lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml new file mode 100644 index 00000000..7f77391b --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_politics" +"description": "以下是中国关于高中政治的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_politics" diff --git a/lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml b/lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml new file mode 100644 index 00000000..33e341eb --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ideological_and_moral_cultivation" +"description": "以下是中国关于思想道德修养与法律基础的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_ideological_and_moral_cultivation" diff --git a/lm_eval/tasks/ceval/ceval-valid_law.yaml b/lm_eval/tasks/ceval/ceval-valid_law.yaml new file mode 100644 index 00000000..921709ce --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "law" +"description": "以下是中国关于法学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_law" diff --git a/lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml b/lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml new file mode 100644 index 00000000..897ed74f --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml @@ -0,0 +1,4 @@ +"dataset_name": "legal_professional" +"description": "以下是中国关于法律职业资格的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_legal_professional" diff --git a/lm_eval/tasks/ceval/ceval-valid_logic.yaml b/lm_eval/tasks/ceval/ceval-valid_logic.yaml new file mode 100644 index 00000000..12912daf --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_logic.yaml @@ -0,0 +1,4 @@ +"dataset_name": "logic" +"description": "以下是中国关于逻辑学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_logic" diff --git a/lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml b/lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml new file mode 100644 index 00000000..0899b735 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml @@ -0,0 +1,4 @@ +"dataset_name": "mao_zedong_thought" +"description": "以下是中国关于毛泽东思想和中国特色社会主义理论体系概论的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_mao_zedong_thought" diff --git a/lm_eval/tasks/ceval/ceval-valid_marxism.yaml b/lm_eval/tasks/ceval/ceval-valid_marxism.yaml new file mode 100644 index 00000000..bfd3d5db --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_marxism.yaml @@ -0,0 +1,4 @@ +"dataset_name": "marxism" +"description": "以下是中国关于马克思主义基本原理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_marxism" diff --git a/lm_eval/tasks/ceval/ceval-valid_metrology_engineer.yaml b/lm_eval/tasks/ceval/ceval-valid_metrology_engineer.yaml new file mode 100644 index 00000000..d312ceff --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_metrology_engineer.yaml @@ -0,0 +1,4 @@ +"dataset_name": "metrology_engineer" +"description": "以下是中国关于注册计量师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_metrology_engineer" diff --git a/lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml b/lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml new file mode 100644 index 00000000..6f872535 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_biology" +"description": "以下是中国关于初中生物的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_biology" diff --git a/lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml b/lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml new file mode 100644 index 00000000..28708b5b --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_chemistry" +"description": "以下是中国关于初中化学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_chemistry" diff --git a/lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml b/lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml new file mode 100644 index 00000000..8d179a2f --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_geography" +"description": "以下是中国关于初中地理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_geography" diff --git a/lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml b/lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml new file mode 100644 index 00000000..b61a8ee8 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_history" +"description": "以下是中国关于初中历史的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_history" diff --git a/lm_eval/tasks/ceval/ceval-valid_middle_school_mathematics.yaml b/lm_eval/tasks/ceval/ceval-valid_middle_school_mathematics.yaml new file mode 100644 index 00000000..698103d4 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_middle_school_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_mathematics" +"description": "以下是中国关于初中数学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_mathematics" diff --git a/lm_eval/tasks/ceval/ceval-valid_middle_school_physics.yaml b/lm_eval/tasks/ceval/ceval-valid_middle_school_physics.yaml new file mode 100644 index 00000000..dbe69686 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_middle_school_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_physics" +"description": "以下是中国关于初中物理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_physics" diff --git a/lm_eval/tasks/ceval/ceval-valid_middle_school_politics.yaml b/lm_eval/tasks/ceval/ceval-valid_middle_school_politics.yaml new file mode 100644 index 00000000..0cf20c29 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_middle_school_politics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_politics" +"description": "以下是中国关于初中政治的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_politics" diff --git a/lm_eval/tasks/ceval/ceval-valid_modern_chinese_history.yaml b/lm_eval/tasks/ceval/ceval-valid_modern_chinese_history.yaml new file mode 100644 index 00000000..b1621075 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_modern_chinese_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "modern_chinese_history" +"description": "以下是中国关于近代史纲要的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_modern_chinese_history" diff --git a/lm_eval/tasks/ceval/ceval-valid_operating_system.yaml b/lm_eval/tasks/ceval/ceval-valid_operating_system.yaml new file mode 100644 index 00000000..0c7afea6 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_operating_system.yaml @@ -0,0 +1,4 @@ +"dataset_name": "operating_system" +"description": "以下是中国关于操作系统的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_operating_system" diff --git a/lm_eval/tasks/ceval/ceval-valid_physician.yaml b/lm_eval/tasks/ceval/ceval-valid_physician.yaml new file mode 100644 index 00000000..66abd59f --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_physician.yaml @@ -0,0 +1,4 @@ +"dataset_name": "physician" +"description": "以下是中国关于医师资格的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_physician" diff --git a/lm_eval/tasks/ceval/ceval-valid_plant_protection.yaml b/lm_eval/tasks/ceval/ceval-valid_plant_protection.yaml new file mode 100644 index 00000000..6d73e014 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_plant_protection.yaml @@ -0,0 +1,4 @@ +"dataset_name": "plant_protection" +"description": "以下是中国关于植物保护的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_plant_protection" diff --git a/lm_eval/tasks/ceval/ceval-valid_probability_and_statistics.yaml b/lm_eval/tasks/ceval/ceval-valid_probability_and_statistics.yaml new file mode 100644 index 00000000..82d1fcbb --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_probability_and_statistics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "probability_and_statistics" +"description": "以下是中国关于概率统计的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_probability_and_statistics" diff --git a/lm_eval/tasks/ceval/ceval-valid_professional_tour_guide.yaml b/lm_eval/tasks/ceval/ceval-valid_professional_tour_guide.yaml new file mode 100644 index 00000000..3e670fda --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_professional_tour_guide.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_tour_guide" +"description": "以下是中国关于导游资格的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_professional_tour_guide" diff --git a/lm_eval/tasks/ceval/ceval-valid_sports_science.yaml b/lm_eval/tasks/ceval/ceval-valid_sports_science.yaml new file mode 100644 index 00000000..e4747399 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_sports_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "sports_science" +"description": "以下是中国关于体育学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_sports_science" diff --git a/lm_eval/tasks/ceval/ceval-valid_tax_accountant.yaml b/lm_eval/tasks/ceval/ceval-valid_tax_accountant.yaml new file mode 100644 index 00000000..8c461a34 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_tax_accountant.yaml @@ -0,0 +1,4 @@ +"dataset_name": "tax_accountant" +"description": "以下是中国关于税务师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_tax_accountant" diff --git a/lm_eval/tasks/ceval/ceval-valid_teacher_qualification.yaml b/lm_eval/tasks/ceval/ceval-valid_teacher_qualification.yaml new file mode 100644 index 00000000..ca08e24a --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_teacher_qualification.yaml @@ -0,0 +1,4 @@ +"dataset_name": "teacher_qualification" +"description": "以下是中国关于教师资格的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_teacher_qualification" diff --git a/lm_eval/tasks/ceval/ceval-valid_urban_and_rural_planner.yaml b/lm_eval/tasks/ceval/ceval-valid_urban_and_rural_planner.yaml new file mode 100644 index 00000000..957a53fb --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_urban_and_rural_planner.yaml @@ -0,0 +1,4 @@ +"dataset_name": "urban_and_rural_planner" +"description": "以下是中国关于注册城乡规划师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_urban_and_rural_planner" diff --git a/lm_eval/tasks/ceval/ceval-valid_veterinary_medicine.yaml b/lm_eval/tasks/ceval/ceval-valid_veterinary_medicine.yaml new file mode 100644 index 00000000..a493fd65 --- /dev/null +++ b/lm_eval/tasks/ceval/ceval-valid_veterinary_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "veterinary_medicine" +"description": "以下是中国关于兽医学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_veterinary_medicine" -- GitLab From d1c189eaebad807e0d8a7b6ab1d8bbe15d55126c Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 18 Sep 2023 08:23:14 +0000 Subject: [PATCH 186/264] remove scrolls from this PR --- lm_eval/tasks/scrolls/README.md | 84 ------------------- .../tasks/scrolls/contractnli/default.yaml | 9 -- lm_eval/tasks/scrolls/govreport/default.yaml | 4 - lm_eval/tasks/scrolls/metrics.py | 34 -------- .../tasks/scrolls/narrativeqa/default.yaml | 16 ---- lm_eval/tasks/scrolls/preprocessors.py | 77 ----------------- lm_eval/tasks/scrolls/pruned_tasks.py | 4 - .../tasks/scrolls/qasper/boolean_task.yaml | 15 ---- .../tasks/scrolls/qasper/freeform_task.yaml | 14 ---- lm_eval/tasks/scrolls/qasper/utils.py | 25 ------ lm_eval/tasks/scrolls/qmsum/default.yaml | 5 -- lm_eval/tasks/scrolls/quality/default.yaml | 7 -- lm_eval/tasks/scrolls/quality/utils.py | 29 ------- .../scrolls/scroll_multiplechoice_task_yaml | 11 --- .../tasks/scrolls/scroll_summary_task_yaml | 23 ----- .../tasks/scrolls/summscreenfd/default.yaml | 4 - 16 files changed, 361 deletions(-) delete mode 100644 lm_eval/tasks/scrolls/README.md delete mode 100644 lm_eval/tasks/scrolls/contractnli/default.yaml delete mode 100644 lm_eval/tasks/scrolls/govreport/default.yaml delete mode 100644 lm_eval/tasks/scrolls/metrics.py delete mode 100644 lm_eval/tasks/scrolls/narrativeqa/default.yaml delete mode 100644 lm_eval/tasks/scrolls/preprocessors.py delete mode 100644 lm_eval/tasks/scrolls/pruned_tasks.py delete mode 100644 lm_eval/tasks/scrolls/qasper/boolean_task.yaml delete mode 100644 lm_eval/tasks/scrolls/qasper/freeform_task.yaml delete mode 100644 lm_eval/tasks/scrolls/qasper/utils.py delete mode 100644 lm_eval/tasks/scrolls/qmsum/default.yaml delete mode 100644 lm_eval/tasks/scrolls/quality/default.yaml delete mode 100644 lm_eval/tasks/scrolls/quality/utils.py delete mode 100644 lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml delete mode 100644 lm_eval/tasks/scrolls/scroll_summary_task_yaml delete mode 100644 lm_eval/tasks/scrolls/summscreenfd/default.yaml diff --git a/lm_eval/tasks/scrolls/README.md b/lm_eval/tasks/scrolls/README.md deleted file mode 100644 index 193a1179..00000000 --- a/lm_eval/tasks/scrolls/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# SCROLLS - -### Paper - -Title: `SCROLLS: Standardized CompaRison Over Long Language Sequences` - -Abstract: https://arxiv.org/abs/2201.03533 - -SCROLLS is a suite of datasets that require synthesizing information over long texts. -The benchmark includes seven natural language tasks across multiple domains, -including summarization, question answering, and natural language inference. - -Homepage: https://www.scrolls-benchmark.com/ - -Since SCROLLS tasks are generally longer than the maximum sequence length of many models, -it is possible to create "subset" tasks that contain only those samples whose tokenized length -is less than some pre-defined limit. For example, to create a subset of "Qasper" that would -be suitable for a model using the GPTNeoX tokenizer and a 4K maximium sequence length: - -``` -class QasperGPTNeoX4K(Qasper): - PRUNE_TOKENIZERS = ["EleutherAI/pythia-410m-deduped"] - PRUNE_MAX_TOKENS = 4096 - PRUNE_NUM_PROC = _num_cpu_cores() # optional, to speed up pruning of large datasets like NarrativeQA -``` - -`PRUNE_TOKENIZERS` can contain more than one tokenizer; this will include only samples that are -less than `PRUNE_MAX_TOKENS` for ALL of the tokenizers. This can be useful to comparing models -that use different tokenizers but the same maximum sequence length. - -Once the subset task class has been defined in this file, it can be used by adding the class -to `lm_eval/tasks/__init__.py`. - -NOTE: GovReport may need `max_gen_toks` set larger for causal models. - -### Citation - -``` -@inproceedings{shaham-etal-2022-scrolls, - title = "{SCROLLS}: Standardized {C}ompa{R}ison Over Long Language Sequences", - author = "Shaham, Uri and - Segal, Elad and - Ivgi, Maor and - Efrat, Avia and - Yoran, Ori and - Haviv, Adi and - Gupta, Ankit and - Xiong, Wenhan and - Geva, Mor and - Berant, Jonathan and - Levy, Omer", - booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", - month = dec, - year = "2022", - address = "Abu Dhabi, United Arab Emirates", - publisher = "Association for Computational Linguistics", - url = "https://aclanthology.org/2022.emnlp-main.823", - pages = "12007--12021" -} -``` - -### Groups and Tasks - -#### Groups - -* `qasper`: executes both `qasper_bool` and `qasper_freeform` - -#### Tasks - -* `qasper_bool`: Multiple choice task that evaluates the task with `answer_type="bool"` -* `qasper_freeform`: Greedy generation task that evaluates the samples from the task with `answer_type="free form answer"` - -### Checklist - -For adding novel benchmarks/datasets to the library: -* [ ] Is the task an existing benchmark in the literature? - * [ ] Have you referenced the original paper that introduced the task? - * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? - - -If other tasks on this dataset are already supported: -* [ ] Is the "Main" variant of this task clearly denoted? -* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? -* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/scrolls/contractnli/default.yaml b/lm_eval/tasks/scrolls/contractnli/default.yaml deleted file mode 100644 index fc446dd5..00000000 --- a/lm_eval/tasks/scrolls/contractnli/default.yaml +++ /dev/null @@ -1,9 +0,0 @@ -include: ../scroll_multiplechoice_task_yaml -task: scrolls_contractnli -dataset_name: contract_nli -process_docs: !function ../preprocessors.process_docs_prepended_question -doc_to_text: "{{text}}\n\nHypothesis: {{question}}\nConclusion:" -doc_to_target: "{{outputs[0]}}" -doc_to_choice: ["Not mentioned", "Entailment", "Contradiction"] -should_decontaminate: true -doc_to_decontamination_query: input diff --git a/lm_eval/tasks/scrolls/govreport/default.yaml b/lm_eval/tasks/scrolls/govreport/default.yaml deleted file mode 100644 index 80e0246b..00000000 --- a/lm_eval/tasks/scrolls/govreport/default.yaml +++ /dev/null @@ -1,4 +0,0 @@ -include: ../scroll_summary_task_yaml -task: scrolls_govreport -dataset_path: tau/scrolls -dataset_name: gov_report diff --git a/lm_eval/tasks/scrolls/metrics.py b/lm_eval/tasks/scrolls/metrics.py deleted file mode 100644 index adb437bd..00000000 --- a/lm_eval/tasks/scrolls/metrics.py +++ /dev/null @@ -1,34 +0,0 @@ -import evaluate - -rouge_fn = evaluate.load('rouge') - -def rouge1(predictions, references): - results = rouge_fn.compute(predictions=predictions, references=references) - return results['rouge1'] - -def rouge2(predictions, references): - results = rouge_fn.compute(predictions=predictions, references=references) - return results['rouge2'] - -def rougeL(predictions, references): - results = rouge_fn.compute(predictions=predictions, references=references) - return results['rougeL'] - -squad_metric = evaluate.load("squad_v2") - -def agg_f1(samples): - predictions, references = zip(*samples) # unzip, if you will - computed = squad_metric.compute(predictions=predictions, references=references) - return computed["f1"] - - -def _download_metric(): - import os - import shutil - from huggingface_hub import hf_hub_download - scrolls_metric_path = hf_hub_download(repo_id="tau/scrolls", repo_type="dataset", filename="metrics/scrolls.py") - updated_scrolls_metric_path = ( - os.path.dirname(scrolls_metric_path) + os.path.basename(scrolls_metric_path).replace(".", "_") + ".py" - ) - shutil.copy(scrolls_metric_path, updated_scrolls_metric_path) - return updated_scrolls_metric_path \ No newline at end of file diff --git a/lm_eval/tasks/scrolls/narrativeqa/default.yaml b/lm_eval/tasks/scrolls/narrativeqa/default.yaml deleted file mode 100644 index db172672..00000000 --- a/lm_eval/tasks/scrolls/narrativeqa/default.yaml +++ /dev/null @@ -1,16 +0,0 @@ -include: ../scroll_multiplechoice_task_yaml -task: scrolls_narrativeqa -dataset_name: narrative_qa -output_type: greedy_until -process_docs: !function ../preprocessors.process_docs_prepended_question -doc_to_text: "{{text}}\n\nQuestion: {{question}}\nAnswer:" -doc_to_target: "{{outputs| join(', ')}}" -should_decontaminate: true -doc_to_decontamination_query: input -metric_list: - - metric: f1 - aggregation: !function ../metrics.agg_f1 - higher_is_better: true -generation_kwargs: - until: - - "\n" diff --git a/lm_eval/tasks/scrolls/preprocessors.py b/lm_eval/tasks/scrolls/preprocessors.py deleted file mode 100644 index 4f5df832..00000000 --- a/lm_eval/tasks/scrolls/preprocessors.py +++ /dev/null @@ -1,77 +0,0 @@ -from functools import partial - -from transformers import AutoTokenizer - -def _num_cpu_cores(): - # https://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of-cpus-using-python/55423170#55423170 - try: - import psutil - return psutil.cpu_count(logical=False) - except ImportError: - import os - return len(os.sched_getaffinity(0)) - -def process_docs(dataset, custom_process=None, PRUNE_TOKENIZERS=[], PRUNE_MAX_TOKENS=4096, PRUNE_NUM_PROC=_num_cpu_cores()): - - def _drop_duplicates_in_input(untokenized_dataset): - # from scrolls/evaluator/dataset_evaluator.py - - indices_to_keep = [] - id_to_idx = {} - outputs = [] - for i, (id_, output) in enumerate(zip(untokenized_dataset["id"], untokenized_dataset["output"])): - if id_ in id_to_idx: - outputs[id_to_idx[id_]].append(output) - continue - indices_to_keep.append(i) - id_to_idx[id_] = len(outputs) - outputs.append([output]) - untokenized_dataset = untokenized_dataset.select(indices_to_keep).flatten_indices() - untokenized_dataset = untokenized_dataset.remove_columns("output") - untokenized_dataset = untokenized_dataset.add_column("outputs", outputs) - return untokenized_dataset - - dataset = _drop_duplicates_in_input(dataset) - if custom_process is not None: - dataset = dataset.map(custom_process) - - if len(PRUNE_TOKENIZERS) > 0: - tokenizers = [AutoTokenizer.from_pretrained(tokenizer) for tokenizer in PRUNE_TOKENIZERS] - cache = {} - - def _get_prune_text(doc): - return doc_to_text(doc) - - def _filter(sample): - text = _get_prune_text(sample) - cached = cache.get(text, None) - if cached is None: - for tokenizer in tokenizers: - if len(tokenizer(text).input_ids) > PRUNE_MAX_TOKENS: - cache[text] = False - return False - cache[text] = True - return True - else: - return cached - - dataset = dataset.filter(_filter, num_proc=PRUNE_NUM_PROC) - - return dataset - -def _doc_prepended_question(doc): - # "When a query is given in addition to the raw text (as - # in QMSum, Qasper, NarrativeQA, QuALITY, and ContractNLI), - # we prepend it to the text, using two newlines as a natural separator" - input = doc["input"] - split = input.find("\n\n") - return { - "id": doc["id"], - "pid": doc["pid"], - "input": input, - "outputs": doc["outputs"], - "question": input[0:split], - "text": input[split + 2:] - } - -process_docs_prepended_question = partial(process_docs, custom_process=_doc_prepended_question) \ No newline at end of file diff --git a/lm_eval/tasks/scrolls/pruned_tasks.py b/lm_eval/tasks/scrolls/pruned_tasks.py deleted file mode 100644 index b0a06377..00000000 --- a/lm_eval/tasks/scrolls/pruned_tasks.py +++ /dev/null @@ -1,4 +0,0 @@ -from functools import partial -from preprocessors import _doc_prepended_question - -process_docs_prepended_questionGPTNeoX4K = partial(process_docs, custom_process=_doc_prepended_question) \ No newline at end of file diff --git a/lm_eval/tasks/scrolls/qasper/boolean_task.yaml b/lm_eval/tasks/scrolls/qasper/boolean_task.yaml deleted file mode 100644 index f765d1b8..00000000 --- a/lm_eval/tasks/scrolls/qasper/boolean_task.yaml +++ /dev/null @@ -1,15 +0,0 @@ -group: scrolls -task: scrolls_qasper_boolean -dataset_path: tau/scrolls -dataset_name: qasper -output_type: multiple_choice -training_split: train -validation_split: validation -process_docs: !function ../preprocessors.process_docs_prepended_question -doc_to_text: "{{text}}\n\nHypothesis: {{question}}\nConclusion:" -doc_to_target: "{{outputs[0]}}" -doc_to_choice: ["yes", "no"] -should_decontaminate: true -doc_to_decontamination_query: input -metric_list: - - metric: f1 diff --git a/lm_eval/tasks/scrolls/qasper/freeform_task.yaml b/lm_eval/tasks/scrolls/qasper/freeform_task.yaml deleted file mode 100644 index 1e3c64a4..00000000 --- a/lm_eval/tasks/scrolls/qasper/freeform_task.yaml +++ /dev/null @@ -1,14 +0,0 @@ -group: scrolls -task: scrolls_qasper_freeform -dataset_path: tau/scrolls -dataset_name: qasper -output_type: greedy_until -training_split: train -validation_split: validation -process_docs: !function ../preprocessors.process_docs_prepended_question -doc_to_text: "{{text}}\n\nHypothesis: {{question}}\nConclusion:" -doc_to_target: "{{outputs[0]}}" -should_decontaminate: true -doc_to_decontamination_query: input -metric_list: - - metric: f1 diff --git a/lm_eval/tasks/scrolls/qasper/utils.py b/lm_eval/tasks/scrolls/qasper/utils.py deleted file mode 100644 index 49963b6d..00000000 --- a/lm_eval/tasks/scrolls/qasper/utils.py +++ /dev/null @@ -1,25 +0,0 @@ -import transformers.data.metrics.squad_metrics as squad_metrics - -def process_docs(dataset): - - dataset = process_docs_prepended_question(dataset) - - def _process_doc(doc): - - doc["is_yes_no"] = reduce(lambda prev, cur: prev and squad_metrics.normalize_answer(cur) - in ["yes", "no"], doc["outputs"], True) - - return doc - - return dataset.map(_process_doc) - -def process_results(doc, results): - if doc["is_yes_no"]: - prediction = " yes" if results[0] > results[1] else " no" - elif len(results[0].strip()) == 0: - prediction = "Unanswerable" - else: - prediction = results[0] - return { - "f1": (prediction, doc["outputs"]) - } \ No newline at end of file diff --git a/lm_eval/tasks/scrolls/qmsum/default.yaml b/lm_eval/tasks/scrolls/qmsum/default.yaml deleted file mode 100644 index a5caf807..00000000 --- a/lm_eval/tasks/scrolls/qmsum/default.yaml +++ /dev/null @@ -1,5 +0,0 @@ -include: ../scroll_summary_task_yaml -task: scrolls_qmsum -dataset_path: tau/scrolls -dataset_name: qmsum -process_docs: !function ../preprocessors.process_docs_prepended_question diff --git a/lm_eval/tasks/scrolls/quality/default.yaml b/lm_eval/tasks/scrolls/quality/default.yaml deleted file mode 100644 index b05e1196..00000000 --- a/lm_eval/tasks/scrolls/quality/default.yaml +++ /dev/null @@ -1,7 +0,0 @@ -include: ../scroll_multiplechoice_task_yaml -task: scrolls_quality -dataset_name: quality -process_docs: !function utils.process_docs -doc_to_text: "{{text}}\n\nQuestion: {{question}}\nAnswer:" -doc_to_target: gold -doc_to_choice: "{{choices}}" diff --git a/lm_eval/tasks/scrolls/quality/utils.py b/lm_eval/tasks/scrolls/quality/utils.py deleted file mode 100644 index d2d7eea5..00000000 --- a/lm_eval/tasks/scrolls/quality/utils.py +++ /dev/null @@ -1,29 +0,0 @@ -import re -from functools import partial - -import sys -sys.path.append('..') -from preprocessors import process_docs_prepended_question - -def process_docs(dataset): - - dataset = process_docs_prepended_question(dataset) - - _multiple_choice_pattern = re.compile(r" *\([A-D]\) *") - - def _normalize_answer(text): - return " ".join(text.split()).strip() - - def _process_doc(doc): - - split = doc["text"].find("\n\n", doc["text"].find("(D)")) - choices_text = doc["text"][:split] - - doc["text"] = doc["text"][split:].strip() - doc["choices"] = [_normalize_answer(choice) for choice in re.split( - _multiple_choice_pattern, choices_text)[1:]] - doc["gold"] = doc["choices"].index(_normalize_answer(doc["outputs"][0])) - - return doc - - return dataset.map(_process_doc) diff --git a/lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml b/lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml deleted file mode 100644 index 0d29e639..00000000 --- a/lm_eval/tasks/scrolls/scroll_multiplechoice_task_yaml +++ /dev/null @@ -1,11 +0,0 @@ -group: scrolls -dataset_path: tau/scrolls -output_type: multiple_choice -training_split: train -validation_split: validation -process_docs: !function preprocessors.process_docs -should_decontaminate: true -doc_to_decontamination_query: input -metric_list: - - metric: acc - - metric: acc_norm diff --git a/lm_eval/tasks/scrolls/scroll_summary_task_yaml b/lm_eval/tasks/scrolls/scroll_summary_task_yaml deleted file mode 100644 index a464ccab..00000000 --- a/lm_eval/tasks/scrolls/scroll_summary_task_yaml +++ /dev/null @@ -1,23 +0,0 @@ -group: scrolls -dataset_path: tau/scrolls -output_type: greedy_until -training_split: train -validation_split: validation -process_docs: !function preprocessors.process_docs -doc_to_text: "{{input}}\n\nQuestion: What is a summary of the preceding text?\nAnswer:" -doc_to_target: "{{outputs|join(', ')}}" -should_decontaminate: true -doc_to_decontamination_query: input -metric_list: - - metric: !function metrics.rouge1 - aggregation: mean - higher_is_better: true - - metric: !function metrics.rouge2 - aggregation: mean - higher_is_better: true - - metric: !function metrics.rougeL - aggregation: mean - higher_is_better: true -generation_kwargs: - until: - - "\n" diff --git a/lm_eval/tasks/scrolls/summscreenfd/default.yaml b/lm_eval/tasks/scrolls/summscreenfd/default.yaml deleted file mode 100644 index 7ecabfae..00000000 --- a/lm_eval/tasks/scrolls/summscreenfd/default.yaml +++ /dev/null @@ -1,4 +0,0 @@ -include: ../scroll_summary_task_yaml -task: scrolls_summscreenfd -dataset_path: tau/scrolls -dataset_name: summ_screen_fd -- GitLab From 11be7c939f2d0d55df689d1d6197544820ae7529 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 18 Sep 2023 08:27:56 +0000 Subject: [PATCH 187/264] format --- lm_eval/tasks/mutual/utils.py | 2 +- lm_eval/tasks/qasper/README.md | 2 +- lm_eval/tasks/qasper/metrics.py | 5 +++++ lm_eval/tasks/qasper/utils.py | 17 ++++++++--------- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/lm_eval/tasks/mutual/utils.py b/lm_eval/tasks/mutual/utils.py index 46d3ce4b..972ffec1 100644 --- a/lm_eval/tasks/mutual/utils.py +++ b/lm_eval/tasks/mutual/utils.py @@ -1,7 +1,7 @@ import numpy as np -def process_docs(dataset): +def process_docs(dataset): def _detokenize(text): text = text.replace(" '", "'") text = text.replace(" \n", "\n") diff --git a/lm_eval/tasks/qasper/README.md b/lm_eval/tasks/qasper/README.md index 647d54ea..ada111e1 100644 --- a/lm_eval/tasks/qasper/README.md +++ b/lm_eval/tasks/qasper/README.md @@ -46,7 +46,7 @@ Homepage: https://allenai.org/data/qasper #### Tasks -* `qasper_bool`: Multiple choice task that evaluates the task with `answer_type="bool"` +* `qasper_bool`: Multiple choice task that evaluates the task with `answer_type="bool"` * `qasper_freeform`: Greedy generation task that evaluates the samples from the task with `answer_type="free form answer"` ### Checklist diff --git a/lm_eval/tasks/qasper/metrics.py b/lm_eval/tasks/qasper/metrics.py index d4c08670..f4de05e0 100644 --- a/lm_eval/tasks/qasper/metrics.py +++ b/lm_eval/tasks/qasper/metrics.py @@ -1,5 +1,9 @@ +import re import string +from collections import Counter + + def normalize_answer(s): """ Taken from the official evaluation script for v1.1 of the SQuAD dataset. @@ -21,6 +25,7 @@ def normalize_answer(s): return white_space_fix(remove_articles(remove_punc(lower(s)))) + def f1_abstractive(predictions, references): """ Taken from the official evaluation script for v1.1 of the SQuAD dataset. diff --git a/lm_eval/tasks/qasper/utils.py b/lm_eval/tasks/qasper/utils.py index 537a2ab2..be6f79dc 100644 --- a/lm_eval/tasks/qasper/utils.py +++ b/lm_eval/tasks/qasper/utils.py @@ -1,15 +1,10 @@ from datasets import Dataset from functools import partial + def process_docs(dataset, set_answer_type="bool"): - FEATURES = [ - "title", - "abstract", - "question", - "answer", - "answer_type" - ] + FEATURES = ["title", "abstract", "question", "answer", "answer_type"] def _categorise_answer(answer_blob): if answer_blob["unanswerable"]: @@ -62,12 +57,16 @@ def process_docs(dataset, set_answer_type="bool"): return obs_list - dataset = dataset.map(_flatten, remove_columns=[key for key in dataset.features.keys() if key not in FEATURES]) + dataset = dataset.map( + _flatten, + remove_columns=[key for key in dataset.features.keys() if key not in FEATURES], + ) new_dataset = {} for key in dataset.features.keys(): new_dataset[key] = [x for row in dataset[key] for x in row] - + return Dataset.from_dict(new_dataset) + process_docs_bool = partial(process_docs, set_answer_type="bool") process_docs_freeform = partial(process_docs, set_answer_type="free form answer") -- GitLab From 068155782c473e4017e69c42d687fa629a1bba6c Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Mon, 18 Sep 2023 21:58:59 +0500 Subject: [PATCH 188/264] added `sympy`, `antlr4` dependencies for `minerva_math` --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index c85d6946..d24305e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,8 @@ dependencies = [ "tqdm-multiprocess", "transformers>=4.1", "zstandard", + "sympy", + "antlr4-python3-runtime >= 4.11, < 4.12" ] [tool.setuptools] -- GitLab From c6dc1ca4068021c68052b5a6c694de10efde1562 Mon Sep 17 00:00:00 2001 From: Hojjat Mokhtarabadi Date: Mon, 18 Sep 2023 21:04:50 +0330 Subject: [PATCH 189/264] changes to promptsource output_type reverted --- lm_eval/tasks/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 53fae225..6e10ec95 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -60,7 +60,8 @@ def check_prompt_config(config: dict[str, str]) -> List[dict[str, str]]: prompt_variation, ] ) - } + }, + **{"output_type": "greedy_until"} } ) else: -- GitLab From f4c1b9d6bfb698473acb34dadc546f69697736ef Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Mon, 18 Sep 2023 23:14:05 +0500 Subject: [PATCH 190/264] sympy v. particular with dependency requirements --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d24305e1..a73505d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ dependencies = [ "tqdm-multiprocess", "transformers>=4.1", "zstandard", - "sympy", + "sympy>=1.12", "antlr4-python3-runtime >= 4.11, < 4.12" ] -- GitLab From 30cedf0cfe092bce64f49ec865842566feb3c3c3 Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Mon, 18 Sep 2023 23:32:00 +0500 Subject: [PATCH 191/264] type annotations 3.8 compatible --- lm_eval/tasks/minerva_math/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/minerva_math/utils.py b/lm_eval/tasks/minerva_math/utils.py index 3f9709a9..0987fe62 100644 --- a/lm_eval/tasks/minerva_math/utils.py +++ b/lm_eval/tasks/minerva_math/utils.py @@ -4,7 +4,7 @@ import sympy from sympy.parsing.latex import parse_latex import signal from lm_eval.logger import eval_logger -from typing import Optional +from typing import Optional, List, Dict # taken from @@ -69,7 +69,7 @@ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: return dataset.map(_process_doc) -def process_results(doc: dict, results: list[str]) -> dict[str, int]: +def process_results(doc: dict, results: List[str]) -> Dict[str, int]: candidates = results[0] unnormalized_answer = get_unnormalized_answer(candidates) -- GitLab From 78a33749ce9882ba4403efd66789e8669a16fd9f Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Mon, 18 Sep 2023 23:34:13 +0500 Subject: [PATCH 192/264] update readme --- lm_eval/tasks/minerva_math/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lm_eval/tasks/minerva_math/README.md b/lm_eval/tasks/minerva_math/README.md index 2b1daf70..fb2845e2 100644 --- a/lm_eval/tasks/minerva_math/README.md +++ b/lm_eval/tasks/minerva_math/README.md @@ -61,9 +61,9 @@ For adding novel benchmarks/datasets to the library: If other tasks on this dataset are already supported: -* [ ] Is the "Main" variant of this task clearly denoted? -* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? -* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? ### Variant Wishlist -- GitLab From 0e75f58c98aa219cef860bb744c49b550a78d2b2 Mon Sep 17 00:00:00 2001 From: baberabb <92168766+baberabb@users.noreply.github.com> Date: Tue, 19 Sep 2023 02:42:44 +0500 Subject: [PATCH 193/264] raise exception if dependencies not met --- lm_eval/tasks/minerva_math/utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lm_eval/tasks/minerva_math/utils.py b/lm_eval/tasks/minerva_math/utils.py index 0987fe62..87684765 100644 --- a/lm_eval/tasks/minerva_math/utils.py +++ b/lm_eval/tasks/minerva_math/utils.py @@ -181,6 +181,9 @@ def is_equiv(x1: str, x2: str) -> bool: except TimeoutError: eval_logger.debug(f"Timed out comparing {x1} and {x2}") return False + except ImportError as e: + eval_logger.error(e) + raise except Exception as e: eval_logger.debug(f"Failed comparing {x1} and {x2} with {e}") return False -- GitLab From 8daee3425f0a9e84917e32d0bfd92fd0759a34d6 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 10:23:44 +0700 Subject: [PATCH 194/264] Update __init__.py --- lm_eval/tasks/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 6e10ec95..6828b233 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -61,7 +61,7 @@ def check_prompt_config(config: dict[str, str]) -> List[dict[str, str]]: ] ) }, - **{"output_type": "greedy_until"} + **{"output_type": "greedy_until"}, } ) else: -- GitLab From 0f3f1267be0f80290d5fd7ef43237fc3c1ed734b Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 10:37:25 +0700 Subject: [PATCH 195/264] Update write_out.py --- scripts/write_out.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/write_out.py b/scripts/write_out.py index e7288f2d..ccae75f6 100644 --- a/scripts/write_out.py +++ b/scripts/write_out.py @@ -48,7 +48,7 @@ def main(): docs = join_iters(iters) - with open(os.path.join(args.output_base_path, task_name), "w", encoding='utf8') as f: + with open(os.path.join(args.output_base_path, task_name), "w", encoding="utf8") as f: for i, doc in ( zip(range(args.num_examples), docs) if args.num_examples > 0 -- GitLab From a89b132dc4e096165ca9591e13bd4c63b48a9f82 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 11:26:59 +0700 Subject: [PATCH 196/264] Update write_out.py --- scripts/write_out.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/write_out.py b/scripts/write_out.py index ccae75f6..f338c865 100644 --- a/scripts/write_out.py +++ b/scripts/write_out.py @@ -48,7 +48,9 @@ def main(): docs = join_iters(iters) - with open(os.path.join(args.output_base_path, task_name), "w", encoding="utf8") as f: + with open( + os.path.join(args.output_base_path, task_name), "w", encoding="utf8" + ) as f: for i, doc in ( zip(range(args.num_examples), docs) if args.num_examples > 0 -- GitLab From 1ed76cfa7213ebf7bd73a05baa2be089b7023581 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 19 Sep 2023 09:32:50 +0000 Subject: [PATCH 197/264] moved benchmarks back to tasks/ --- lm_eval/benchmarks/__init__.py | 63 ------------- lm_eval/evaluator.py | 1 - lm_eval/tasks/__init__.py | 43 ++++++++- lm_eval/{ => tasks}/benchmarks/pythia.yaml | 0 lm_eval/{ => tasks}/benchmarks/t0_eval.yaml | 0 lm_eval/tasks/squadv2/default.yaml | 44 +++------ lm_eval/tasks/squadv2/utils.py | 99 ++++++++------------- main.py | 2 +- 8 files changed, 88 insertions(+), 164 deletions(-) delete mode 100644 lm_eval/benchmarks/__init__.py rename lm_eval/{ => tasks}/benchmarks/pythia.yaml (100%) rename lm_eval/{ => tasks}/benchmarks/t0_eval.yaml (100%) diff --git a/lm_eval/benchmarks/__init__.py b/lm_eval/benchmarks/__init__.py deleted file mode 100644 index 4b224d2b..00000000 --- a/lm_eval/benchmarks/__init__.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -import yaml - -from lm_eval import utils -from lm_eval.tasks import register_configurable_task, check_prompt_config -from lm_eval.logger import eval_logger -from lm_eval.api.registry import ( - TASK_REGISTRY, - GROUP_REGISTRY, - ALL_TASKS, -) - - -def include_benchmarks(task_dir: str) -> None: - for root, subdirs, file_list in os.walk(task_dir): - if (subdirs == [] or subdirs == ["__pycache__"]) and (len(file_list) > 0): - for f in file_list: - if f.endswith(".yaml"): - try: - benchmark_path = os.path.join(root, f) - - with open(benchmark_path, "rb") as file: - yaml_config = yaml.full_load(file) - - assert "group" in yaml_config - group = yaml_config["group"] - all_task_list = yaml_config["task"] - config_list = [ - task for task in all_task_list if type(task) != str - ] - task_list = [ - task for task in all_task_list if type(task) == str - ] - - for task_config in config_list: - var_configs = check_prompt_config( - { - **task_config, - **{"group": group}, - } - ) - for config in var_configs: - register_configurable_task(config) - - task_names = utils.pattern_match(task_list, ALL_TASKS) - for task in task_names: - if (task in TASK_REGISTRY) or (task in GROUP_REGISTRY): - if group in GROUP_REGISTRY: - GROUP_REGISTRY[group].append(task) - else: - GROUP_REGISTRY[group] = [task] - ALL_TASKS.add(group) - except Exception as error: - eval_logger.warning( - "Failed to load benchmark in\n" - f" {benchmark_path}\n" - " Benchmark will not be added to registry\n" - f" Error: {error}" - ) - - -task_dir = os.path.dirname(os.path.abspath(__file__)) + "/" -include_benchmarks(task_dir) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 2975f9d9..8c95d56b 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -11,7 +11,6 @@ import numpy as np import lm_eval.api import lm_eval.tasks -import lm_eval.benchmarks import lm_eval.models import lm_eval.api.metrics import lm_eval.api.registry diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index eaa0fbc4..64c1a65d 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -37,6 +37,37 @@ def register_configurable_task(config: Dict[str, str]) -> int: return 0 +def register_configurable_group(config: Dict[str, str]) -> int: + group = config["group"] + all_task_list = config["task"] + config_list = [ + task for task in all_task_list if type(task) != str + ] + task_list = [ + task for task in all_task_list if type(task) == str + ] + + for task_config in config_list: + var_configs = check_prompt_config( + { + **task_config, + **{"group": group}, + } + ) + for config in var_configs: + register_configurable_task(config) + + task_names = utils.pattern_match(task_list, ALL_TASKS) + for task in task_names: + if (task in TASK_REGISTRY) or (task in GROUP_REGISTRY): + if group in GROUP_REGISTRY: + GROUP_REGISTRY[group].append(task) + else: + GROUP_REGISTRY[group] = [task] + ALL_TASKS.add(group) + + return 0 + def check_prompt_config(config: Dict[str, str]) -> List[Dict[str, str]]: all_configs = [] @@ -87,9 +118,15 @@ def include_task_folder(task_dir: str) -> None: yaml_path = os.path.join(root, f) try: config = utils.load_yaml_config(yaml_path) - all_configs = check_prompt_config(config) - for config in all_configs: - register_configurable_task(config) + + # If a `task` in config is a list, + # that means it's a benchmark + if type(config["task"]) == list: + register_configurable_group(config) + else: + all_configs = check_prompt_config(config) + for config in all_configs: + register_configurable_task(config) except Exception as error: eval_logger.warning( diff --git a/lm_eval/benchmarks/pythia.yaml b/lm_eval/tasks/benchmarks/pythia.yaml similarity index 100% rename from lm_eval/benchmarks/pythia.yaml rename to lm_eval/tasks/benchmarks/pythia.yaml diff --git a/lm_eval/benchmarks/t0_eval.yaml b/lm_eval/tasks/benchmarks/t0_eval.yaml similarity index 100% rename from lm_eval/benchmarks/t0_eval.yaml rename to lm_eval/tasks/benchmarks/t0_eval.yaml diff --git a/lm_eval/tasks/squadv2/default.yaml b/lm_eval/tasks/squadv2/default.yaml index c49d3fb8..2bb3029f 100644 --- a/lm_eval/tasks/squadv2/default.yaml +++ b/lm_eval/tasks/squadv2/default.yaml @@ -4,44 +4,22 @@ output_type: greedy_until training_split: train validation_split: validation doc_to_text: "Title: {{title}}\n\nBackground: {{context}}\n\nQuestion: {{question}}\n\n Answer:" -doc_to_target: "{% if answers.text| length > 0 %}{{answers.text}}{% else %}{{['unanswerable']}}{% endif %}" +doc_to_target: "{% if answers.text| length > 0 %}{{answers.text}}{% else %}{{['']}}{% endif %}" target_delimiter: "" should_decontaminate: true doc_to_decontamination_query: context -process_results: !function utils.process_results generation_kwargs: until: - - "\n\n" - "\n" - do_sample: false - temperature: 0.0 -filter_list: - - name: remove_whitespace - filter: - - function: remove_whitespace - - function: take_first +# filter_list: +# - name: remove_whitespace +# filter: +# - function: remove_whitespace +# - function: take_first metric_list: - - metric: exact - aggregation: !function utils.exact + - metric: !function utils.exact + aggregation: mean + higher_is_better: true + - metric: !function utils.f1 + aggregation: mean higher_is_better: true - # - metric: f1 - # aggregation: mean - # higher_is_better: true - # - metric: HasAns_exact - # aggregation: mean - # higher_is_better: true - # - metric: HasAns_f1 - # aggregation: mean - # higher_is_better: true - # - metric: NoAns_exact - # aggregation: mean - # higher_is_better: true - # - metric: NoAns_f1 - # aggregation: mean - # higher_is_better: true - # - metric: best_exact - # aggregation: mean - # higher_is_better: true - # - metric: best_f1 - # aggregation: mean - # higher_is_better: true diff --git a/lm_eval/tasks/squadv2/utils.py b/lm_eval/tasks/squadv2/utils.py index 1a3ac0e8..9c8c6fb1 100644 --- a/lm_eval/tasks/squadv2/utils.py +++ b/lm_eval/tasks/squadv2/utils.py @@ -1,68 +1,41 @@ -import evaluate - -from math import exp -from functools import partial - - -def process_results(doc, results): - - continuation = results[0] - no_answer_probability = 0 # exp(logprob_unanswerable) - - predictions = { - "id": doc["id"], - "prediction_text": continuation, - "no_answer_probability": no_answer_probability, - } - - references = { - "id": doc["id"], - "answers": doc["answers"], - } - - return { - "predictions": predictions, - "reference": references - } - # return _squad_metric([predictions], [references]) - # return {key: value if key in metrics for key, value in score.items()} - - -def _squad_metric(predictions, references): - squad_metric = evaluate.load("squad_v2") - return squad_metric.compute(predictions=predictions, references=references) +import re +import string +import collections + +def normalize_answer(s): + """Lower text and remove punctuation, articles and extra whitespace.""" + def remove_articles(text): + regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) + return re.sub(regex, ' ', text) + def white_space_fix(text): + return ' '.join(text.split()) + def remove_punc(text): + exclude = set(string.punctuation) + return ''.join(ch for ch in text if ch not in exclude) + def lower(text): + return text.lower() + return white_space_fix(remove_articles(remove_punc(lower(s)))) + +def get_tokens(s): + if not s: return [] + return normalize_answer(s).split() # Exact match (the normalized answer exactly match the gold answer) -def exact(items): - print(items) - import sys; sys.exit() - predictions, references = zip(*items) - return _squad_metric(predictions=predictions, references=references)["exact"] +def exact(predictions, references): + return int(normalize_answer(references[0]) == normalize_answer(predictions[0])) # The F-score of predicted tokens versus the gold answer def f1(predictions, references): - return _squad_metric(predictions=predictions, references=references)["f1"] - -# Exact match (the normalized answer exactly match the gold answer) -def HasAns_exact(predictions, references): - return _squad_metric(predictions=predictions, references=references)["HasAns_exact"] - -# The F-score of predicted tokens versus the gold answer -def HasAns_f1(predictions, references): - return _squad_metric(predictions=predictions, references=references)["HasAns_f1"] - -# Exact match (the normalized answer exactly match the gold answer) -def NoAns_exact(predictions, references): - return _squad_metric(predictions=predictions, references=references)["NoAns_exact"] - -# The F-score of predicted tokens versus the gold answer -def NoAns_f1(predictions, references): - return _squad_metric(predictions=predictions, references=references)["NoAns_f1"] - -# Best exact match (with varying threshold) -def best_exact(predictions, references): - return _squad_metric(predictions=predictions, references=references)["best_exact"] - -# Best F1 (with varying threshold) -def best_f1(predictions, references): - return _squad_metric(predictions=predictions, references=references)["best_f1"] + gold_toks = get_tokens(references[0]) + pred_toks = get_tokens(predictions[0]) + common = collections.Counter(gold_toks) & collections.Counter(pred_toks) + num_same = sum(common.values()) + if len(gold_toks) == 0 or len(pred_toks) == 0: + # If either is no-answer, then F1 is 1 if they agree, 0 otherwise + return int(gold_toks == pred_toks) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(pred_toks) + recall = 1.0 * num_same / len(gold_toks) + f1 = (2 * precision * recall) / (precision + recall) + return f1 diff --git a/main.py b/main.py index 83e249d6..3b21bd62 100644 --- a/main.py +++ b/main.py @@ -11,7 +11,7 @@ from lm_eval import evaluator, utils from lm_eval.api.registry import ALL_TASKS from lm_eval.logger import eval_logger, SPACING from lm_eval.tasks import include_task_folder -from lm_eval.benchmarks import include_benchmarks +# from lm_eval.benchmarks import include_benchmarks os.environ["TOKENIZERS_PARALLELISM"] = "false" -- GitLab From 4578ca146a4ff6cc6a8b71b244125741c0bfb308 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 19 Sep 2023 09:44:19 +0000 Subject: [PATCH 198/264] added additional squad tasks --- lm_eval/tasks/squadv2/no_ans.yaml | 9 +++++++++ lm_eval/tasks/squadv2/with_noans_prob.yaml | 4 ++++ 2 files changed, 13 insertions(+) create mode 100644 lm_eval/tasks/squadv2/no_ans.yaml create mode 100644 lm_eval/tasks/squadv2/with_noans_prob.yaml diff --git a/lm_eval/tasks/squadv2/no_ans.yaml b/lm_eval/tasks/squadv2/no_ans.yaml new file mode 100644 index 00000000..82d7c477 --- /dev/null +++ b/lm_eval/tasks/squadv2/no_ans.yaml @@ -0,0 +1,9 @@ +include: default.yaml +task: squadv2_noans_loglikelihood +dataset_path: squad_v2 +output_type: loglikelihood +training_split: train +validation_split: validation +doc_to_target: " unanswerable" +metric_list: + - metric: perplexity diff --git a/lm_eval/tasks/squadv2/with_noans_prob.yaml b/lm_eval/tasks/squadv2/with_noans_prob.yaml new file mode 100644 index 00000000..22b6e1ac --- /dev/null +++ b/lm_eval/tasks/squadv2/with_noans_prob.yaml @@ -0,0 +1,4 @@ +group: squadv2_complete +task: + - squadv2 + - squadv2_noans_loglikelihood \ No newline at end of file -- GitLab From a20094528dc4eb73454801a21afb074f1124f6d6 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 19 Sep 2023 09:44:42 +0000 Subject: [PATCH 199/264] format --- lm_eval/tasks/__init__.py | 9 +++------ lm_eval/tasks/squadv2/utils.py | 20 +++++++++++++++----- lm_eval/tasks/squadv2/with_noans_prob.yaml | 2 +- main.py | 1 + 4 files changed, 20 insertions(+), 12 deletions(-) diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 64c1a65d..c5749a1b 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -37,15 +37,12 @@ def register_configurable_task(config: Dict[str, str]) -> int: return 0 + def register_configurable_group(config: Dict[str, str]) -> int: group = config["group"] all_task_list = config["task"] - config_list = [ - task for task in all_task_list if type(task) != str - ] - task_list = [ - task for task in all_task_list if type(task) == str - ] + config_list = [task for task in all_task_list if type(task) != str] + task_list = [task for task in all_task_list if type(task) == str] for task_config in config_list: var_configs = check_prompt_config( diff --git a/lm_eval/tasks/squadv2/utils.py b/lm_eval/tasks/squadv2/utils.py index 9c8c6fb1..8c788cc0 100644 --- a/lm_eval/tasks/squadv2/utils.py +++ b/lm_eval/tasks/squadv2/utils.py @@ -2,28 +2,38 @@ import re import string import collections + def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" + def remove_articles(text): - regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) - return re.sub(regex, ' ', text) + regex = re.compile(r"\b(a|an|the)\b", re.UNICODE) + return re.sub(regex, " ", text) + def white_space_fix(text): - return ' '.join(text.split()) + return " ".join(text.split()) + def remove_punc(text): exclude = set(string.punctuation) - return ''.join(ch for ch in text if ch not in exclude) + return "".join(ch for ch in text if ch not in exclude) + def lower(text): return text.lower() + return white_space_fix(remove_articles(remove_punc(lower(s)))) + def get_tokens(s): - if not s: return [] + if not s: + return [] return normalize_answer(s).split() + # Exact match (the normalized answer exactly match the gold answer) def exact(predictions, references): return int(normalize_answer(references[0]) == normalize_answer(predictions[0])) + # The F-score of predicted tokens versus the gold answer def f1(predictions, references): gold_toks = get_tokens(references[0]) diff --git a/lm_eval/tasks/squadv2/with_noans_prob.yaml b/lm_eval/tasks/squadv2/with_noans_prob.yaml index 22b6e1ac..dacfb920 100644 --- a/lm_eval/tasks/squadv2/with_noans_prob.yaml +++ b/lm_eval/tasks/squadv2/with_noans_prob.yaml @@ -1,4 +1,4 @@ group: squadv2_complete task: - squadv2 - - squadv2_noans_loglikelihood \ No newline at end of file + - squadv2_noans_loglikelihood diff --git a/main.py b/main.py index 3b21bd62..36bfcd04 100644 --- a/main.py +++ b/main.py @@ -11,6 +11,7 @@ from lm_eval import evaluator, utils from lm_eval.api.registry import ALL_TASKS from lm_eval.logger import eval_logger, SPACING from lm_eval.tasks import include_task_folder + # from lm_eval.benchmarks import include_benchmarks os.environ["TOKENIZERS_PARALLELISM"] = "false" -- GitLab From fa5ca8c361d37a5c67593cca9ebb879200fdb7d5 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 16:52:29 +0700 Subject: [PATCH 200/264] Create cot_yaml Forgot this file --- lm_eval/tasks/mgsm/native_cot/cot_yaml | 29 ++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 lm_eval/tasks/mgsm/native_cot/cot_yaml diff --git a/lm_eval/tasks/mgsm/native_cot/cot_yaml b/lm_eval/tasks/mgsm/native_cot/cot_yaml new file mode 100644 index 00000000..f5cf60d9 --- /dev/null +++ b/lm_eval/tasks/mgsm/native_cot/cot_yaml @@ -0,0 +1,29 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: mgsm_cot_native +dataset_path: juletxara/mgsm +dataset_name: null # Overridden by language-specific config. +output_type: greedy_until +training_split: train +test_split: test +target_delimiter: "" +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)" + - function: "take_first" -- GitLab From feedaf085ebbb451d0e48e661e4811a3e5bd1429 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 16:56:25 +0700 Subject: [PATCH 201/264] Update mgsm_cot_native_bn.yaml --- lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml index c4918f5c..76e52a7c 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_bn.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[16+1]}}{% else %}{{answer_nu doc_to_text: '{% if answer is not none %}{{question+"\nধাপে ধাপে উত্তর:"}}{% else %}{{"প্রশ্ন: "+question+"\nধাপে ধাপে উত্তর:"}}{% endif %}' include: cot_yaml -task: mgsm_bn_direct +task: mgsm_bn_native_cot -- GitLab From 17190841c147e9667d6a00596570aefeb02ab8be Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 17:06:59 +0700 Subject: [PATCH 202/264] Update mgsm_cot_native_en.yaml --- lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml index 7d39dcc0..c1fa07f5 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_en.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[20+1]}}{% else %}{{answer_nu doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' include: cot_yaml -task: mgsm_en_direct +task: mgsm_en_native_cot -- GitLab From b2eea396aacc86a7eb5a7c44cd6a162b4e6ac6e3 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 17:07:18 +0700 Subject: [PATCH 203/264] Update mgsm_cot_native_de.yaml --- lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml index e84bcb45..ada27858 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_de.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[28+1]}}{% else %}{{answer_nu doc_to_text: '{% if answer is not none %}{{question+"\nSchritt-für-Schritt-Antwort:"}}{% else %}{{"Frage: "+question+"\nSchritt-für-Schritt-Antwort:"}}{% endif %}' include: cot_yaml -task: mgsm_de_direct +task: mgsm_de_native_cot -- GitLab From 58bef0be2c5ed6ec827e3a9008eb8a6a99e6b4a2 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 17:08:04 +0700 Subject: [PATCH 204/264] Update mgsm_cot_native_es.yaml --- lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml index 0bd3b688..5a0ba219 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_es.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[22+1]}}{% else %}{{answer_nu doc_to_text: '{% if answer is not none %}{{question+"\nRespuesta paso a paso:"}}{% else %}{{"Pregunta: "+question+"\nRespuesta paso a paso:"}}{% endif %}' include: cot_yaml -task: mgsm_es_direct +task: mgsm_es_native_cot -- GitLab From f7cff1ec233be789eeb7366d3db606a1cf02bf1d Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 17:08:11 +0700 Subject: [PATCH 205/264] Update mgsm_cot_native_fr.yaml --- lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml index 500cb05a..c1ce3ad1 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_fr.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[25+1]}}{% else %}{{answer_nu doc_to_text: '{% if answer is not none %}{{question+"\nRéponse étape par étape :"}}{% else %}{{"Question : "+question+"\nRéponse étape par étape :"}}{% endif %}' include: cot_yaml -task: mgsm_fr_direct +task: mgsm_fr_native_cot -- GitLab From 7700d78ac55323d0cb50df8889aa70fe374f2293 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 17:08:19 +0700 Subject: [PATCH 206/264] Update mgsm_cot_native_ja.yaml --- lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml index 05830b22..f2866f54 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ja.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[10+1]}}{% else %}{{answer_nu doc_to_text: '{% if answer is not none %}{{question+"\nステップごとの答え:"}}{% else %}{{"問題: "+question+"\nステップごとの答え:"}}{% endif %}' include: cot_yaml -task: mgsm_ja_direct +task: mgsm_ja_native_cot -- GitLab From 5d5d53a7a8a597f51cef2952658b412b17196701 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 17:08:26 +0700 Subject: [PATCH 207/264] Update mgsm_cot_native_ru.yaml --- lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml index 0cbd994c..00b00b09 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_ru.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[17+1]}}{% else %}{{answer_nu doc_to_text: '{% if answer is not none %}{{question+"\nПошаговоерешение:"}}{% else %}{{"Задача: "+question+"\nПошаговоерешение:"}}{% endif %}' include: cot_yaml -task: mgsm_ru_direct +task: mgsm_ru_native_cot -- GitLab From d52d78aadc6ae23c9da35f79abecac9e354d7fae Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 17:08:33 +0700 Subject: [PATCH 208/264] Update mgsm_cot_native_sw.yaml --- lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml index 4b47b2fe..693eea24 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_sw.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[24+1]}}{% else %}{{answer_nu doc_to_text: '{% if answer is not none %}{{question+"\nJibu la Hatua kwa Hatua:"}}{% else %}{{"Swali: "+question+"\nJibu la Hatua kwa Hatua:"}}{% endif %}' include: cot_yaml -task: mgsm_sw_direct +task: mgsm_sw_native_cot -- GitLab From ae473f79e6a65f3b8d1bf2183c26f063cd5183fe Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 17:08:40 +0700 Subject: [PATCH 209/264] Update mgsm_cot_native_te.yaml --- lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml index 4c3b76a0..30cab550 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_te.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[18+1]}}{% else %}{{answer_nu doc_to_text: '{% if answer is not none %}{{question+"\nదశలవారీగా సమాధానం:"}}{% else %}{{"ప్రశ్న: "+question+"\nదశలవారీగా సమాధానం:"}}{% endif %}' include: cot_yaml -task: mgsm_te_direct +task: mgsm_te_native_cot -- GitLab From 0f2ff6afd6dcc6b15232e17b2917dcaba7f148e3 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 17:08:47 +0700 Subject: [PATCH 210/264] Update mgsm_cot_native_th.yaml --- lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml index 5311bcf8..1e570000 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_th.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[17+1]}}{% else %}{{answer_nu doc_to_text: '{% if answer is not none %}{{question+"\nคำตอบทีละขั้นตอน:"}}{% else %}{{"โจทย์: "+question+"\nคำตอบทีละขั้นตอน:"}}{% endif %}' include: cot_yaml -task: mgsm_th_direct +task: mgsm_th_native_cot -- GitLab From 4a967f26511f150c3488d854f57b63b8e7e309dd Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 17:08:55 +0700 Subject: [PATCH 211/264] Update mgsm_cot_native_zh.yaml --- lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml index db0d83b7..3e1be0b3 100644 --- a/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml +++ b/lm_eval/tasks/mgsm/native_cot/mgsm_cot_native_zh.yaml @@ -5,4 +5,4 @@ doc_to_target: '{% if answer is not none %}{{answer[5+1]}}{% else %}{{answer_num doc_to_text: '{% if answer is not none %}{{question+"\n逐步解答:"}}{% else %}{{"问题: "+question+"\n逐步解答:"}}{% endif %}' include: cot_yaml -task: mgsm_zh_direct +task: mgsm_zh_native_cot -- GitLab From f921fc19bf5029ba36732b9534ca8acf4b17fdc1 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 19 Sep 2023 10:10:50 +0000 Subject: [PATCH 212/264] fixed task and benchmark adding process --- lm_eval/tasks/__init__.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index c5749a1b..c6a78e9c 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -104,7 +104,7 @@ def get_task_name_from_config(task_config: Dict[str, str]) -> str: return "{dataset_path}".format(**task_config) -def include_task_folder(task_dir: str) -> None: +def include_task_folder(task_dir: str, register_task=True) -> None: """ Calling this function """ @@ -116,14 +116,15 @@ def include_task_folder(task_dir: str) -> None: try: config = utils.load_yaml_config(yaml_path) - # If a `task` in config is a list, - # that means it's a benchmark - if type(config["task"]) == list: - register_configurable_group(config) - else: + if register_task: all_configs = check_prompt_config(config) for config in all_configs: register_configurable_task(config) + else: + # If a `task` in config is a list, + # that means it's a benchmark + if type(config["task"]) == list: + register_configurable_group(config) except Exception as error: eval_logger.warning( @@ -136,6 +137,8 @@ def include_task_folder(task_dir: str) -> None: task_dir = os.path.dirname(os.path.abspath(__file__)) + "/" include_task_folder(task_dir) +# Register Benchmarks after all tasks have been added +include_task_folder(task_dir, register_task=False) def get_task(task_name, config): -- GitLab From a019eeae2ab337182f6959b2563d7a129336966c Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 19 Sep 2023 15:48:33 +0000 Subject: [PATCH 213/264] temporarily deactivate some Actions --- .github/workflows/new_tasks.yml | 136 +++++++++++++++---------------- .github/workflows/unit_tests.yml | 62 +++++++------- 2 files changed, 99 insertions(+), 99 deletions(-) diff --git a/.github/workflows/new_tasks.yml b/.github/workflows/new_tasks.yml index ebb28a30..fb406988 100644 --- a/.github/workflows/new_tasks.yml +++ b/.github/workflows/new_tasks.yml @@ -1,72 +1,72 @@ -name: Tasks Modified +# name: Tasks Modified -on: - push: - branches: - - 'big-refactor*' - pull_request: - branches: - - 'big-refactor*' - workflow_dispatch: -# comment/edit out the above to stop/change the triggers -jobs: - changed_files: - runs-on: ubuntu-latest # windows-latest || macos-latest - timeout-minutes: 120 - name: Scan for changed tasks - steps: - - name: checkout - uses: actions/checkout@v3 - with: - fetch-depth: 2 # OR "2" -> To retrieve the preceding commit. +# on: +# push: +# branches: +# - 'big-refactor*' +# pull_request: +# branches: +# - 'big-refactor*' +# workflow_dispatch: +# # comment/edit out the above to stop/change the triggers +# jobs: +# changed_files: +# runs-on: ubuntu-latest # windows-latest || macos-latest +# timeout-minutes: 120 +# name: Scan for changed tasks +# steps: +# - name: checkout +# uses: actions/checkout@v3 +# with: +# fetch-depth: 2 # OR "2" -> To retrieve the preceding commit. - # Uses the tj-actions/changed-files@v37 action to check for changes. - # Outputs provided here: https://github.com/tj-actions/changed-files#outputs - # The `files_yaml` input optionally takes a yaml string to specify filters, - # and prepends the filter name to the standard output names. - - name: Check task folders - id: changed-tasks - uses: tj-actions/changed-files@v37.1.2 - with: - # tasks checks the tasks folder and api checks the api folder for changes - files_yaml: | - tasks: - - lm_eval/tasks/** - api: - - lm_eval/api/** - write_output_files: true +# # Uses the tj-actions/changed-files@v37 action to check for changes. +# # Outputs provided here: https://github.com/tj-actions/changed-files#outputs +# # The `files_yaml` input optionally takes a yaml string to specify filters, +# # and prepends the filter name to the standard output names. +# - name: Check task folders +# id: changed-tasks +# uses: tj-actions/changed-files@v37.1.2 +# with: +# # tasks checks the tasks folder and api checks the api folder for changes +# files_yaml: | +# tasks: +# - lm_eval/tasks/** +# api: +# - lm_eval/api/** +# write_output_files: true - # The next step is optional; the files are written to the workspace by default (above). - # so it's just for debugging - - name: Run Tests - if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' - run: | - echo .github/outputs/tasks_all_changed_and_modified_files.txt >> 'GITHUB_ENV' - echo "One or more test file(s) has changed." - echo "List of all the files that have changed: ${{ steps.changed-tasks.outputs.tasks_all_modified_files }}" +# # The next step is optional; the files are written to the workspace by default (above). +# # so it's just for debugging +# - name: Run Tests +# if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' +# run: | +# echo .github/outputs/tasks_all_changed_and_modified_files.txt >> 'GITHUB_ENV' +# echo "One or more test file(s) has changed." +# echo "List of all the files that have changed: ${{ steps.changed-tasks.outputs.tasks_all_modified_files }}" - - name: Set up Python 3.9 - if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' - uses: actions/setup-python@v4 - with: - python-version: 3.9 - cache: 'pip' - cache-dependency-path: setup.py - - name: Install dependencies - if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' - run: | - python -m pip install --upgrade pip - pip install -e '.[testing]' --extra-index-url https://download.pytorch.org/whl/cpu - # Install optional git dependencies - # pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt - # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - - name: Test with pytest - # if new tasks are added, run tests on them - if: steps.changed-tasks.outputs.tasks_any_modified == 'true' - run: python -m pytest tests/test_tasks.py -s -vv - # if api is modified, run tests on it - - name: Test more tasks with pytest - env: - API: true - if: steps.changed-tasks.outputs.api_any_modified == 'true' - run: python -m pytest tests/test_tasks.py -s -vv +# - name: Set up Python 3.9 +# if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' +# uses: actions/setup-python@v4 +# with: +# python-version: 3.9 +# cache: 'pip' +# cache-dependency-path: setup.py +# - name: Install dependencies +# if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' +# run: | +# python -m pip install --upgrade pip +# pip install -e '.[testing]' --extra-index-url https://download.pytorch.org/whl/cpu +# # Install optional git dependencies +# # pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt +# # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +# - name: Test with pytest +# # if new tasks are added, run tests on them +# if: steps.changed-tasks.outputs.tasks_any_modified == 'true' +# run: python -m pytest tests/test_tasks.py -s -vv +# # if api is modified, run tests on it +# - name: Test more tasks with pytest +# env: +# API: true +# if: steps.changed-tasks.outputs.api_any_modified == 'true' +# run: python -m pytest tests/test_tasks.py -s -vv diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index be9f3fa9..13e9524c 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -44,35 +44,35 @@ jobs: # - name: Lint with mypy # run: mypy . --ignore-missing-imports --check-untyped-defs --explicit-package-bases --warn-unreachable # Job 2 - testcpu: - name: CPU Tests - runs-on: ubuntu-latest - strategy: - matrix: - python-version: [ "3.9", "3.10", "3.11" ] - timeout-minutes: 30 +# testcpu: +# name: CPU Tests +# runs-on: ubuntu-latest +# strategy: +# matrix: +# python-version: [ "3.9", "3.10", "3.11" ] +# timeout-minutes: 30 - steps: - - name: Checkout Code - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - cache: pip - cache-dependency-path: setup.py - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -e '.[testing,anthropic,sentencepiece]' --extra-index-url https://download.pytorch.org/whl/cpu -# Install optional git dependencies -# pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt -# if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - - name: Test with pytest - run: python -m pytest --showlocals -s -vv -n=auto --ignore=tests/tests_master --ignore=tests/extra - - name: Archive artifacts - uses: actions/upload-artifact@v3 - with: - name: output_results - path: | - test_logs/* +# steps: +# - name: Checkout Code +# uses: actions/checkout@v3 +# - name: Set up Python ${{ matrix.python-version }} +# uses: actions/setup-python@v4 +# with: +# python-version: ${{ matrix.python-version }} +# cache: pip +# cache-dependency-path: setup.py +# - name: Install dependencies +# run: | +# python -m pip install --upgrade pip +# pip install -e '.[testing,anthropic,sentencepiece]' --extra-index-url https://download.pytorch.org/whl/cpu +# # Install optional git dependencies +# # pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt +# # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +# - name: Test with pytest +# run: python -m pytest --showlocals -s -vv -n=auto --ignore=tests/tests_master --ignore=tests/extra +# - name: Archive artifacts +# uses: actions/upload-artifact@v3 +# with: +# name: output_results +# path: | +# test_logs/* -- GitLab From d03974ad39a74ea375a074ce4cf65e0cba03c6f2 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Tue, 19 Sep 2023 22:55:54 +0700 Subject: [PATCH 214/264] Update main.py --- main.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/main.py b/main.py index 36bfcd04..96207884 100644 --- a/main.py +++ b/main.py @@ -12,8 +12,6 @@ from lm_eval.api.registry import ALL_TASKS from lm_eval.logger import eval_logger, SPACING from lm_eval.tasks import include_task_folder -# from lm_eval.benchmarks import include_benchmarks - os.environ["TOKENIZERS_PARALLELISM"] = "false" -- GitLab From 877e9a6196be14148ab6195f00cb4094b901a3ec Mon Sep 17 00:00:00 2001 From: Chris <34248815+chrisociepa@users.noreply.github.com> Date: Thu, 21 Sep 2023 15:09:09 +0200 Subject: [PATCH 215/264] Fix positional arguments in HF model generate --- lm_eval/models/huggingface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/models/huggingface.py b/lm_eval/models/huggingface.py index 8f3bd963..f39d3bff 100644 --- a/lm_eval/models/huggingface.py +++ b/lm_eval/models/huggingface.py @@ -508,7 +508,7 @@ class HFLM(LM): self.tokenizer, stop, 1, context.shape[0] ) return self.model.generate( - context, + input_ids=context, max_length=max_length, stopping_criteria=stopping_criteria, pad_token_id=self.eot_token_id, -- GitLab From 7418eddd9dca4bce8b4e553d6528ea0572777110 Mon Sep 17 00:00:00 2001 From: Lintang Sutawika Date: Thu, 21 Sep 2023 20:54:48 +0700 Subject: [PATCH 216/264] Create README.md --- lm_eval/tasks/asdiv/README.md | 56 +++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 lm_eval/tasks/asdiv/README.md diff --git a/lm_eval/tasks/asdiv/README.md b/lm_eval/tasks/asdiv/README.md new file mode 100644 index 00000000..e6e5aeec --- /dev/null +++ b/lm_eval/tasks/asdiv/README.md @@ -0,0 +1,56 @@ +# ASDiv + +### Paper + +Title: `ASDiv: A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers` + +Abstract: https://arxiv.org/abs/2106.15772 + +ASDiv (Academia Sinica Diverse MWP Dataset) is a diverse (in terms of both language +patterns and problem types) English math word problem (MWP) corpus for evaluating +the capability of various MWP solvers. Existing MWP corpora for studying AI progress +remain limited either in language usage patterns or in problem types. We thus present +a new English MWP corpus with 2,305 MWPs that cover more text patterns and most problem +types taught in elementary school. Each MWP is annotated with its problem type and grade +level (for indicating the level of difficulty). + +NOTE: We currently ignore formulas for answer generation. + +Homepage: https://github.com/chaochun/nlu-asdiv-dataset + + +### Citation + +``` +@misc{miao2021diverse, + title={A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers}, + author={Shen-Yun Miao and Chao-Chun Liang and Keh-Yih Su}, + year={2021}, + eprint={2106.15772}, + archivePrefix={arXiv}, + primaryClass={cs.AI} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `asdiv` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? -- GitLab From 76227f0dbec309c7124de09a9b7ce4009f8bb8f1 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 21 Sep 2023 14:07:58 +0000 Subject: [PATCH 217/264] add codexglue --- lm_eval/tasks/code_x_glue/code-text/bleu.py | 218 ++++++++++--------- lm_eval/tasks/code_x_glue/code-text/utils.py | 12 +- 2 files changed, 126 insertions(+), 104 deletions(-) diff --git a/lm_eval/tasks/code_x_glue/code-text/bleu.py b/lm_eval/tasks/code_x_glue/code-text/bleu.py index 50243474..aff16afe 100644 --- a/lm_eval/tasks/code_x_glue/code-text/bleu.py +++ b/lm_eval/tasks/code_x_glue/code-text/bleu.py @@ -1,12 +1,12 @@ #!/usr/bin/python -''' -This script was adapted from the original version by hieuhoang1972 which is part of MOSES. -''' +""" +This script was adapted from the original version by hieuhoang1972 which is part of MOSES. +""" # $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $ -'''Provides: +"""Provides: cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test(). cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked(). @@ -15,7 +15,7 @@ score_cooked(alltest, n=4): Score a list of cooked test sentences. score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids. The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible. -''' +""" import sys, math, re, xml.sax.saxutils import subprocess @@ -28,167 +28,188 @@ preserve_case = False eff_ref_len = "shortest" normalize1 = [ - ('', ''), # strip "skipped" tags - (r'-\n', ''), # strip end-of-line hyphenation and join lines - (r'\n', ' '), # join lines -# (r'(\d)\s+(?=\d)', r'\1'), # join digits + ("", ""), # strip "skipped" tags + (r"-\n", ""), # strip end-of-line hyphenation and join lines + (r"\n", " "), # join lines + # (r'(\d)\s+(?=\d)', r'\1'), # join digits ] normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1] normalize2 = [ - (r'([\{-\~\[-\` -\&\(-\+\:-\@\/])',r' \1 '), # tokenize punctuation. apostrophe is missing - (r'([^0-9])([\.,])',r'\1 \2 '), # tokenize period and comma unless preceded by a digit - (r'([\.,])([^0-9])',r' \1 \2'), # tokenize period and comma unless followed by a digit - (r'([0-9])(-)',r'\1 \2 ') # tokenize dash when preceded by a digit + ( + r"([\{-\~\[-\` -\&\(-\+\:-\@\/])", + r" \1 ", + ), # tokenize punctuation. apostrophe is missing + ( + r"([^0-9])([\.,])", + r"\1 \2 ", + ), # tokenize period and comma unless preceded by a digit + ( + r"([\.,])([^0-9])", + r" \1 \2", + ), # tokenize period and comma unless followed by a digit + (r"([0-9])(-)", r"\1 \2 "), # tokenize dash when preceded by a digit ] normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2] + def normalize(s): - '''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.''' + """Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.""" # Added to bypass NIST-style pre-processing of hyp and ref files -- wade - if (nonorm): + if nonorm: return s.split() if type(s) is not str: s = " ".join(s) # language-independent part: for (pattern, replace) in normalize1: s = re.sub(pattern, replace, s) - s = xml.sax.saxutils.unescape(s, {'"':'"'}) + s = xml.sax.saxutils.unescape(s, {""": '"'}) # language-dependent part (assuming Western languages): s = " %s " % s if not preserve_case: - s = s.lower() # this might not be identical to the original + s = s.lower() # this might not be identical to the original for (pattern, replace) in normalize2: s = re.sub(pattern, replace, s) return s.split() + def count_ngrams(words, n=4): counts = {} - for k in range(1,n+1): - for i in range(len(words)-k+1): - ngram = tuple(words[i:i+k]) - counts[ngram] = counts.get(ngram, 0)+1 + for k in range(1, n + 1): + for i in range(len(words) - k + 1): + ngram = tuple(words[i : i + k]) + counts[ngram] = counts.get(ngram, 0) + 1 return counts + def cook_refs(refs, n=4): - '''Takes a list of reference sentences for a single segment + """Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU - needs to know about them.''' - + needs to know about them.""" + refs = [normalize(ref) for ref in refs] maxcounts = {} for ref in refs: counts = count_ngrams(ref, n) - for (ngram,count) in counts.items(): - maxcounts[ngram] = max(maxcounts.get(ngram,0), count) + for (ngram, count) in counts.items(): + maxcounts[ngram] = max(maxcounts.get(ngram, 0), count) return ([len(ref) for ref in refs], maxcounts) + def cook_test(test, item, n=4): - '''Takes a test sentence and returns an object that - encapsulates everything that BLEU needs to know about it.''' - (reflens, refmaxcounts)=item + """Takes a test sentence and returns an object that + encapsulates everything that BLEU needs to know about it.""" + (reflens, refmaxcounts) = item test = normalize(test) result = {} result["testlen"] = len(test) # Calculate effective reference sentence length. - + if eff_ref_len == "shortest": result["reflen"] = min(reflens) elif eff_ref_len == "average": - result["reflen"] = float(sum(reflens))/len(reflens) + result["reflen"] = float(sum(reflens)) / len(reflens) elif eff_ref_len == "closest": min_diff = None for reflen in reflens: - if min_diff is None or abs(reflen-len(test)) < min_diff: - min_diff = abs(reflen-len(test)) - result['reflen'] = reflen + if min_diff is None or abs(reflen - len(test)) < min_diff: + min_diff = abs(reflen - len(test)) + result["reflen"] = reflen - result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)] + result["guess"] = [max(len(test) - k + 1, 0) for k in range(1, n + 1)] - result['correct'] = [0]*n + result["correct"] = [0] * n counts = count_ngrams(test, n) for (ngram, count) in counts.items(): - result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count) + result["correct"][len(ngram) - 1] += min(refmaxcounts.get(ngram, 0), count) return result + def score_cooked(allcomps, n=4, ground=0, smooth=1): - totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n} + totalcomps = {"testlen": 0, "reflen": 0, "guess": [0] * n, "correct": [0] * n} for comps in allcomps: - for key in ['testlen','reflen']: + for key in ["testlen", "reflen"]: totalcomps[key] += comps[key] - for key in ['guess','correct']: + for key in ["guess", "correct"]: for k in range(n): totalcomps[key][k] += comps[key][k] logbleu = 0.0 all_bleus = [] for k in range(n): - correct = totalcomps['correct'][k] - guess = totalcomps['guess'][k] - addsmooth = 0 - if smooth == 1 and k > 0: - addsmooth = 1 - logbleu += math.log(correct + addsmooth + sys.float_info.min)-math.log(guess + addsmooth+ sys.float_info.min) - if guess == 0: - all_bleus.append(-10000000) - else: - all_bleus.append(math.log(correct + sys.float_info.min)-math.log( guess )) + correct = totalcomps["correct"][k] + guess = totalcomps["guess"][k] + addsmooth = 0 + if smooth == 1 and k > 0: + addsmooth = 1 + logbleu += math.log(correct + addsmooth + sys.float_info.min) - math.log( + guess + addsmooth + sys.float_info.min + ) + if guess == 0: + all_bleus.append(-10000000) + else: + all_bleus.append(math.log(correct + sys.float_info.min) - math.log(guess)) logbleu /= float(n) all_bleus.insert(0, logbleu) - brevPenalty = min(0,1-float(totalcomps['reflen'] + 1)/(totalcomps['testlen'] + 1)) + brevPenalty = min( + 0, 1 - float(totalcomps["reflen"] + 1) / (totalcomps["testlen"] + 1) + ) for i in range(len(all_bleus)): - if i ==0: - all_bleus[i] += brevPenalty - all_bleus[i] = math.exp(all_bleus[i]) + if i == 0: + all_bleus[i] += brevPenalty + all_bleus[i] = math.exp(all_bleus[i]) return all_bleus -def bleu(refs, candidate, ground=0, smooth=1): + +def bleu(refs, candidate, ground=0, smooth=1): refs = cook_refs(refs) test = cook_test(candidate, refs) return score_cooked([test], ground=ground, smooth=smooth) + def splitPuncts(line): - return ' '.join(re.findall(r"[\w]+|[^\s\w]", line)) + return " ".join(re.findall(r"[\w]+|[^\s\w]", line)) + def computeMaps(predictions, goldfile): - predictionMap = {} - goldMap = {} - gf = open(goldfile, 'r') - - for row in predictions: - cols = row.strip().split('\t') - if len(cols) == 1: - (rid, pred) = (cols[0], '') - else: - (rid, pred) = (cols[0], cols[1]) - predictionMap[rid] = [splitPuncts(pred.strip().lower())] - - for row in gf: - (rid, pred) = row.split('\t') - if rid in predictionMap: # Only insert if the id exists for the method - if rid not in goldMap: - goldMap[rid] = [] - goldMap[rid].append(splitPuncts(pred.strip().lower())) - - sys.stderr.write('Total: ' + str(len(goldMap)) + '\n') - return (goldMap, predictionMap) - - -#m1 is the reference map -#m2 is the prediction map + predictionMap = {} + goldMap = {} + gf = open(goldfile, "r") + + for row in predictions: + cols = row.strip().split("\t") + if len(cols) == 1: + (rid, pred) = (cols[0], "") + else: + (rid, pred) = (cols[0], cols[1]) + predictionMap[rid] = [splitPuncts(pred.strip().lower())] + + for row in gf: + (rid, pred) = row.split("\t") + if rid in predictionMap: # Only insert if the id exists for the method + if rid not in goldMap: + goldMap[rid] = [] + goldMap[rid].append(splitPuncts(pred.strip().lower())) + + sys.stderr.write("Total: " + str(len(goldMap)) + "\n") + return (goldMap, predictionMap) + + +# m1 is the reference map +# m2 is the prediction map def bleuFromMaps(m1, m2): - score = [0] * 5 - num = 0.0 + score = [0] * 5 + num = 0.0 - for key in m1: - if key in m2: - bl = bleu(m1[key], m2[key][0]) - score = [ score[i] + bl[i] for i in range(0, len(bl))] - num += 1 - return [s * 100.0 / num for s in score] + for key in m1: + if key in m2: + bl = bleu(m1[key], m2[key][0]) + score = [score[i] + bl[i] for i in range(0, len(bl))] + num += 1 + return [s * 100.0 / num for s in score] def smoothed_bleu_4(references, predictions, **kwargs): @@ -197,17 +218,18 @@ def smoothed_bleu_4(references, predictions, **kwargs): goldMap = {} for rid, pred in enumerate(predictions): - predictionMap[rid] = [splitPuncts(pred.strip().lower())] + predictionMap[rid] = [splitPuncts(pred.strip().lower())] for rid, row in enumerate(references): - goldMap[rid] = [splitPuncts(row.strip().lower())] + goldMap[rid] = [splitPuncts(row.strip().lower())] return bleuFromMaps(goldMap, predictionMap)[0] -if __name__ == '__main__': - reference_file = sys.argv[1] - predictions = [] - for row in sys.stdin: - predictions.append(row) - (goldMap, predictionMap) = computeMaps(predictions, reference_file) - print (bleuFromMaps(goldMap, predictionMap)[0]) + +if __name__ == "__main__": + reference_file = sys.argv[1] + predictions = [] + for row in sys.stdin: + predictions.append(row) + (goldMap, predictionMap) = computeMaps(predictions, reference_file) + print(bleuFromMaps(goldMap, predictionMap)[0]) diff --git a/lm_eval/tasks/code_x_glue/code-text/utils.py b/lm_eval/tasks/code_x_glue/code-text/utils.py index 89cbbdf3..981a00b9 100644 --- a/lm_eval/tasks/code_x_glue/code-text/utils.py +++ b/lm_eval/tasks/code_x_glue/code-text/utils.py @@ -1,14 +1,14 @@ - def doc_to_text(doc): - inputs = ' '.join(doc['code_tokens']).replace('\n',' ') - inputs = ' '.join(inputs.strip().split()) + inputs = " ".join(doc["code_tokens"]).replace("\n", " ") + inputs = " ".join(inputs.strip().split()) return inputs + def doc_to_target(doc): - targets = ' '.join(doc['docstring_tokens']).replace('\n','') - targets = ' '.join(targets.strip().split()) + targets = " ".join(doc["docstring_tokens"]).replace("\n", "") + targets = " ".join(targets.strip().split()) - return targets \ No newline at end of file + return targets -- GitLab From f0d8b559a9fcca7f115bea8e5ed13996749aeba8 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 21 Sep 2023 14:08:21 +0000 Subject: [PATCH 218/264] update --- lm_eval/tasks/bbh/_generate_configs.py | 44 +++++++++---------- .../_flan_cot_fewshot_template_yaml | 2 +- .../_flan_cot_zeroshot_template_yaml | 2 +- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/lm_eval/tasks/bbh/_generate_configs.py b/lm_eval/tasks/bbh/_generate_configs.py index ae2fb38f..0c882af0 100644 --- a/lm_eval/tasks/bbh/_generate_configs.py +++ b/lm_eval/tasks/bbh/_generate_configs.py @@ -13,18 +13,13 @@ from tqdm import tqdm from lm_eval import utils from lm_eval.logger import eval_logger + def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--base_yaml_path", required=True) - parser.add_argument( - "--save_prefix_path", default="flan_zeroshot" - ) - parser.add_argument( - "--cot", default=False - ) - parser.add_argument( - "--fewshot", default=False - ) + parser.add_argument("--save_prefix_path", default="flan_zeroshot") + parser.add_argument("--cot", default=False) + parser.add_argument("--fewshot", default=False) parser.add_argument("--task_prefix", default="") return parser.parse_args() @@ -44,7 +39,9 @@ if __name__ == "__main__": dataset_path = "lukaemon/bbh" for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()): - resp = requests.get(f"https://raw.githubusercontent.com/suzgunmirac/BIG-Bench-Hard/main/cot-prompts/{task}.txt").content.decode('utf-8') + resp = requests.get( + f"https://raw.githubusercontent.com/suzgunmirac/BIG-Bench-Hard/main/cot-prompts/{task}.txt" + ).content.decode("utf-8") prompt = resp.split("\n-----\n")[-1] description, *few_shot = prompt.split("\n\nQ:") @@ -54,13 +51,13 @@ if __name__ == "__main__": prefix_doc_to_text = " ".join(few_shot) else: for shot in few_shot: - shot = "Q:"+shot + shot = "Q:" + shot try: answer = answer_regex.search(shot)[0] except: print("task", task) print(shot) - example = shot.split("Let\'s think step by step.")[0] + example = shot.split("Let's think step by step.")[0] prefix_doc_to_text += f"{example}{answer}\n\n" doc_to_text = prefix_doc_to_text + base_doc_to_text @@ -68,17 +65,20 @@ if __name__ == "__main__": doc_to_text = doc_to_text + " Let's think step by step.\n" yaml_dict = { - "include": base_yaml_name, - "task": f"bbh_{args.task_prefix}_{task}", - "dataset_name": task, - "description": description+"\n\n", - "doc_to_text": doc_to_text, - } + "include": base_yaml_name, + "task": f"bbh_{args.task_prefix}_{task}", + "dataset_name": task, + "description": description + "\n\n", + "doc_to_text": doc_to_text, + } file_save_path = args.save_prefix_path + f"/{task}.yaml" eval_logger.info(f"Saving yaml for subset {task} to {file_save_path}") with open(file_save_path, "w") as yaml_file: - yaml.dump(yaml_dict, yaml_file, width=float("inf"), allow_unicode=True, default_style='"') - - - + yaml.dump( + yaml_dict, + yaml_file, + width=float("inf"), + allow_unicode=True, + default_style='"', + ) diff --git a/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml b/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml index 680c2533..2e2e8bc9 100644 --- a/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml +++ b/lm_eval/tasks/bbh/flan_cot_fewshot/_flan_cot_fewshot_template_yaml @@ -19,4 +19,4 @@ filter_list: filter: - function: "regex" regex_pattern: "(?<=the answer is )(.*)(?=.)" - - function: "take_first" \ No newline at end of file + - function: "take_first" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml index 66ab12e3..b6574a4e 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml @@ -19,4 +19,4 @@ filter_list: filter: - function: "regex" regex_pattern: "(?<=the answer is )(.*)(?=.)" - - function: "take_first" \ No newline at end of file + - function: "take_first" -- GitLab From ea8b5beb04a705f63401969bcfd3ffa02f216645 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 21 Sep 2023 14:08:39 +0000 Subject: [PATCH 219/264] update --- lm_eval/tasks/mmlu/_cot_prompts.json | 2 +- lm_eval/tasks/mmlu/_generate_configs.py | 21 ++++++++++++------- .../tasks/mmlu/default/_default_template_yaml | 2 +- .../_mmlu_flan_cot_fewshot_template_yaml | 2 +- .../_mmlu_flan_loglikelihood_template_yaml | 2 +- 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/lm_eval/tasks/mmlu/_cot_prompts.json b/lm_eval/tasks/mmlu/_cot_prompts.json index 4714567a..fea25419 100644 --- a/lm_eval/tasks/mmlu/_cot_prompts.json +++ b/lm_eval/tasks/mmlu/_cot_prompts.json @@ -1 +1 @@ -{"abstract_algebra": "The following are multiple choice questions (with answers) about abstract algebra.\n\nQ: Statement 1 | Every element of a group generates a cyclic subgroup of the group. Statement 2 | The symmetric group S_10 has 10 elements.\n(A) True, True (B) False, False (C) True, False (D) False, True\nA: Let's think step by step. A cyclic group is a group that is generated by a single element. Hence a subgroup generated by a single element of a group is cyclic and Statement 1 is True. The answer is (C).\n\nQ: The symmetric group $S_n$ has $\nactorial{n}$ elements, hence it is not true that $S_{10}$ has 10 elements.\nFind the characteristic of the ring 2Z.\n(A) 0 (B) 3 (C) 12 (D) 30\nA: Let's think step by step. A characteristic of a ring is R is $n$ if the statement $ka = 0$ for all $a\\in 2Z$ implies that $k$ is a multiple of $n$. Assume that $ka = 0$ for all $a\\in 2Z$ for some $k$. In particular $2k = 0$. Hence $k=0$ and $n=0$. The answer is (A).\n\nQ: Statement 1| Every function from a finite set onto itself must be one to one. Statement 2 | Every subgroup of an abelian group is abelian.\n(A) True, True (B) False, False (C) True, False (D) False, True\nA: Let's think step by step. Statement 1 is true. Let $S$ be a finite set. If $f:S \nightarrow S$ is a onto function, then $|S| = |f(S)|$. If $f$ was not one to one, then for finite domain $S$ the image would have less than $S$ elements, a contradiction.\nStatement 2 is true. Let $G$ be an abelian group and $H$ be a subgroup of $G$. We need to show that $H$ is abelian. Let $a,b \\in H$. Then $a,b \\in G$ and $ab=ba$. Since $G$ is abelian, $ab=ba$. Since $H$ is a subgroup of $G$, $ab \\in H$. Therefore, $ab=ba$ and $H$ is abelian. The answer is (A).\n\nQ: Statement 1 | If aH is an element of a factor group, then |aH| divides |a|. Statement 2 | If H and K are subgroups of G then HK is a subgroup of G.\n(A) True, True (B) False, False (C) True, False (D) False, True\nA: Let's think step by step. Statement 2 is false. Let $H$ be a subgroup of $S_3$ generated by the cycle $(1,2)$ and $K$ be a subgroup of $S_3$ generated by the cycle $(1,3)$. Both $H$ and $K$ have two elements, the generators and the identity. However $HK$ contains cycles (1,2), (1,3) and (2,3,1), but the inverse of (2,3,1) is (2,1,3) and it does not belong to HK, hence HK is not a subgroup. The answer is (B).\n\nQ: Find all c in Z_3 such that Z_3[x]/(x^2 + c) is a field.\n(A) 0 (B) 1 (C) 2 (D) 3\nA: Let's think step by step. Z_3[x]/(x^2 + c) is a field if and only if x^2 + c does not have roots in Z_3. That is x^2 + c != 0 for every x in Z_3. If c = 0, then x^2 + c = x^2 has root 0. If c = 1 then x^2 + c = x^2 + 1 = 0 + 1 for x = 0, 1 + 1 = 2 for x = 1 and 1 + 1 = 2 for x = 2, hence x^2 + 1 does not have any roots. For c = 2 the polynomial x^2 + 2 has two roots at x = 1 and x = 2. Hence Z_3[x]/(x^2 + c) is a field if and only if c = 1. The answer is (B).", "anatomy": "The following are multiple choice questions (with answers) about anatomy.\n\nQ: Which of the following is the body cavity that contains the pituitary gland?\n(A) Abdominal (B) Cranial (C) Pleural (D) Spinal\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. The pituitary gland is the major endocrine gland attached to the base of the brain, and it is contained in the Cranial cavity. The answer is (B).\n\nQ: Which of these branches of the trigeminal nerve contain somatic motor processes?\n(A) The supraorbital nerve (B) The infraorbital nerve (C) The mental nerve (D) None of the above\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. \nWe know the following: (A) The supraorbital nerve (also known as the frontal nerve) is the largest branch of the ophthalmic nerve and branch of ophthalmic division of the trigeminal nerve. (B) The infraorbital nerve is a branch of the maxillary division of the trigeminal nerve. (C) The mental nerve is a branch of the mandibular division of the trigeminal nerve. Because all these nerves are purely sensory nerves and do not contain any somatic motor processes. Therefore, the answer should be none of the above, which is (D). The answer is (D).\n\nQ: In Angle's Class II Div 2 occlusion there is\n(A) excess overbite of the upper lateral incisors. (B) negative overjet of the upper central incisors. (C) excess overjet of the upper lateral incisors. (D) excess overjet of the upper central incisors.\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. This is a question related to anatomy and orthodontics. Excess overjet is associated with Class II occlusions; therefore, we can safely eliminate (B) from the list, as negative overjet is often associated with Class III occlusions. Now, we need to determine the location of the excess overjet, and that would be the upper (maxillary) lateral incisors. Only (C) has the correct information. The answer is (C).\n\nQ: The pleura\n(A) have no sensory innervation. (B) are separated by a 2 mm space. (C) extend into the neck. (D) are composed of respiratory epithelium.\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. First, recall that the pleura refers to the thin layer of tissue that covers the lungs and lines the interior wall of the chest cavity. Now, let\u2019s look at each option:\nOption (A): \u201cThe pleura have no sensory innervation.\u201d This information is not correct. The pleura do have a sensory innervation.\nOption (B): \u201cThe pleura are separated by a 2 mm space.\u201d This information is not correct. There is a very thin \u201cpotential\u201d space between the layers of the pleura; however, it is typically filled with serous pleural fluid. \nOption (C): \u201cThe pleura extend into the neck.\u201d This information is actuakky true. The cervical pleura, also known as the dome of the pleuradome of the pleura, lines the extendsiton of the pleural cavity into the neck.\nOption (D): \u201cThe pleura are composed of respiratory epithelium.\u201d This information is not correct. The pleaura are composed of connective tissue (CT).\nBecause (A), (B), and (D) are all incorrect, (D) is the only correct answer. The answer is (C).\n\nQ: What is the embryological origin of the hyoid bone?\n(A) The first pharyngeal arch (B) The first and second pharyngeal arches (C) The second pharyngeal arch (D) The second and third pharyngeal arches\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. The hyoid bone, which is also known as the hyooid, is a a small U-shaped bone located in the anterior neck. In its resting position, it lies between the ase of the mandible and the third cervical vertebrae. We know that the second and the third pharyngeal arches give rise to the horns of the hyoid bone; therefore, the embryological origin of the hyoid bone are the second and the third pharyngeal arches\u2014this information is covered in the last option (D). Therefore, we conclude that (D) must be the correct answer. The answer is (D).", "astronomy": "The following are multiple choice questions (with answers) about astronomy.\n\nQ: Where do most short-period comets come from and how do we know?\n(A) The Kuiper belt; short period comets tend to be in the plane of the solar system just like the Kuiper belt. (B) The Kuiper belt; short period comets tend to come from random directions indicating a spherical distribution of comets called the Kuiper belt. (C) The asteroid belt; short period comets have orbital periods similar to asteroids like Vesta and are found in the plane of the solar system just like the asteroid belt. (D) The Oort cloud; short period comets tend to be in the plane of the solar system just like the Oort cloud.\nA: Let's think step by step. Most short-period comets come from the Kuiper belt, and we know because short period coments tend to be in the plane of the solar system, just like the Kuiper belt is. The answer is (A).\n\nQ: You are pushing a truck along a road. Would it be easier to accelerate this truck on Mars? Why? (Assume there is no friction)\n(A) It would be harder since the truck is heavier on Mars. (B) It would be easier since the truck is lighter on Mars. (C) It would be harder since the truck is lighter on Mars. (D) It would be the same no matter where you are.\nA: Let's think step by step. If we assume that there is no friction, the force needed to accelerate the truck is by Newton\u2019s second law only dependent on the mass of the truck. Hence (A), (B) and (C) are incorrect since it doesn\u2019t matter that it\u2019s on Mars, and (D) is the correct answer. The answer is (D).\n\nQ: Say the pupil of your eye has a diameter of 5 mm and you have a telescope with an aperture of 50 cm. How much more light can the telescope gather than your eye?\n(A) 10000 times more (B) 100 times more (C) 1000 times more (D) 10 times more\nA: Let's think step by step. The amount of light is proportional to the aperture area $A = \\pi D^2/4$ for a lens with diameter $D$, so the relative amounts of light between the eye with diameter 5mm and the telescope with diameter 50mm is $(50 cm)^2/(5mm)^2 = 10000$. The answer is (A).\n\nQ: Why isn't there a planet where the asteroid belt is located?\n(A) A planet once formed here but it was broken apart by a catastrophic collision. (B) There was not enough material in this part of the solar nebula to form a planet. (C) There was too much rocky material to form a terrestrial planet but not enough gaseous material to form a jovian planet. (D) Resonance with Jupiter prevented material from collecting together to form a planet.\nA: Let's think step by step. The asteroid belt is a stellar disc consisting of a large number of asteroids between Mars and Jupiter's orbits. The asteroids in this belt are affected by the gravitational pull from both other asteroids and nearby planets. Due to the strong gravitational force of Jupiter there are resonances that give rise to low density regions of asteroids known as the Kirkwood gap. So (B) and (C) are not correct since it\u2019s not a lack of material that prevents a planet from being formed, and (A) is incorrect because the Kirkwood gap would have prevented a planet from forming in the first place, and (D) is the correct option. The answer is (D).\n\nQ: Why is Mars red?\n(A) Because the surface is covered with heavily oxidized (\"rusted\") minerals. (B) Because the atmosphere scatters more light at bluer wavelengths transmitting mostly red light. (C) Because Mars is covered with ancient lava flows which are red in color. (D) Because flowing water on Mars's surface altered the surface minerals several billion years ago.\nA: Let's think step by step. Option (B) is not correct because if the red color was caused by the scattering off the atmosphere, then the earth with a much thicker atmosphere would also look red. Options (C) and (D) are not specific enough about why the color of the surface would be red, while (A) is correct because it explains that the surface is red due to the rusted materials on the surface and the red color comes from the rust. So the correct option is (A). The answer is (A).", "business_ethics": "The following are multiple choice questions (with answers) about business ethics.\n\nQ: In contrast to _______, _______ aim to reward favourable behaviour by companies. The success of such campaigns have been heightened through the use of ___________, which allow campaigns to facilitate the company in achieving _________ .\n(A) Buycotts, Boycotts, Blockchain technology, Charitable donations (B) Buycotts, Boycotts, Digital technology, Increased Sales (C) Boycotts, Buyalls, Blockchain technology, Charitable donations (D) Boycotts, Buycotts, Digital technology, Increased Sales\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cIn contrast to *boycotts*, *buycotts* aim to reward favourable behavior by companies. The success of such campaigns have been heightened through the use of *digital technology*, which allow campaigns to facilitate the company in achieving *increased sales*.\u201d The answer is (D).\n\nQ: _______ is the direct attempt to formally or informally manage ethical issues or problems, through specific policies, practices and programmes.\n(A) Corporate social responsibility (B) Business ethics management (C) Sustainability (D) Environmental management\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The direct attempt manage ethical issues through specific policies, practices, and programs is business ethics management. The answer is (B).\n\nQ: Three contrasting tactics that CSO's can engage in to meet their aims are ________ which typically involves research and communication, ________, which may involve physically attacking a company's operations or ________, often involving some form of _______.\n(A) Non-violent direct action, Violent direct action, Indirect action, Boycott (B) Indirect action, Instrumental action, Non-violent direct action, Information campaign (C) Indirect action, Violent direct action, Non-violent direct-action Boycott (D) Non-violent direct action, Instrumental action, Indirect action, Information campaign\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cThree contrasting tactics that CSO's can engage in to meet their aims are *indirect action*, which typically involves research and communication, *violent direct action*, which may involve physically attacking a company's operations or *non-violent direct action*, often involving some form of *boycott*.\u201d The answer is (C).\n\nQ: To ensure the independence of the non-executive board members, there are a number of steps which can be taken, which include non-executives being drawn from _______ the company, being appointed for a _________ time period as well as being appointed _________.\n(A) Outside, Limited, Independently (B) Inside, Limited, Intermittently (C) Outside, Unlimited, Intermittently (D) Inside, Unlimited, Independently\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cTo ensure the independence of the non-executive board members, there are a number of steps which can be taken, which include non-executives being draw from *outside* the company, being appointed for a *limited* time period as well as being imported *independently*. The answer is (A).\n\nQ: Beyond the business case for engaging in CSR there are a number of moral arguments relating to: negative _______, the _______that corporations possess and the ________ of business and society.\n(A) Externalities, Power, Independence (B) Publicity, Insubstantial resources, Mutual dependence (C) Publicity, Power, Independence (D) Externalities, Power, Mutual dependence\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cBeyond the business case for engaging the CSR there are a number of moral arguments relating to: negative *externalities*, the *power* that corporations possess and the *mutual independence* of business and society. The answer is (D).", "clinical_knowledge": "The following are multiple choice questions (with answers) about clinical knowledge.\n\nQ: Glycolysis is the name given to the pathway involving the conversion of:\n(A) glycogen to glucose-1-phosphate. (B) glycogen or glucose to fructose. (C) glycogen or glucose to pyruvate or lactate. (D) glycogen or glucose to pyruvate or acetyl CoA.\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. Glycolysis is the name given to the pathway involving conversion of glycogen or glucose to pyruvate or lactate. The answer is (C).\n\nQ: What is the difference between a male and a female catheter?\n(A) Male and female catheters are different colours. (B) Male catheters are longer than female catheters. (C) Male catheters are bigger than female catheters. (D) Female catheters are longer than male catheters.\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. The difference between a male and female catheter is that male catheters tend to be longer than female catheters. The answer is (B).\n\nQ: How many attempts should you make to cannulate a patient before passing the job on to a senior colleague, according to the medical knowledge of 2020?\n(A) 4 (B) 3 (C) 2 (D) 1\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. According to the medical protocol as of 2020, you should make two attempts to cannulate a patient before passing the job on to a more-senior practitioner. The answer is (C).\n\nQ: In the assessment of the hand function which of the following is true?\n(A) Abduction of the thumb is supplied by spinal root T2 (B) Opposition of the thumb by opponens policis is supplied by spinal root T1 (C) Finger adduction is supplied by the median nerve (D) Finger abduction is mediated by the palmar interossei\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. Of all the options, it is only true that the opposition of the thumb by opponens pollicis is supplied by spinal root T1. The answer is (B).\n\nQ: The energy for all forms of muscle contraction is provided by:\n(A) ATP. (B) ADP. (C) phosphocreatine. (D) oxidative phosphorylation.\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. The energy for muscular contraction is provided by ATP (adenosine triphosphate), which is the powerhouse of the cell. The answer is (A).", "college_biology": "The following are multiple choice questions (with answers) about college biology.\n\nQ: Which of the following represents an accurate statement concerning arthropods?\n(A) They possess an exoskeleton composed primarily of peptidoglycan. (B) They possess an open circulatory system with a dorsal heart. (C) They are members of a biologically unsuccessful phylum incapable of exploiting diverse habitats and nutrition sources. (D) They lack paired, jointed appendages.\nA: Let's think step by step. Peptidoglycan is known to comprise the plasma membrane of most bacteria, rather than the exoskeleton of arthropods, which is made of chitin, which rules out (A). The answer (C) is false because arthropods are a highly successful phylum. Likewise, arthropods have paired, jointed appendages, which rules out (D). The only remaining option is (B), as arthropods have an open circulatory system with a dorsal tubular heart. The answer is (B).\n\nQ: In a given population, 1 out of every 400 people has a cancer caused by a completely recessive allele, b. Assuming the population is in Hardy-Weinberg equilibrium, which of the following is the expected proportion of individuals who carry the b allele but are not expected to develop the cancer?\n(A) 1/400 (B) 19/400 (C) 20/400 (D) 38/400\nA: Let's think step by step. According to the Hardy Weinberg Law, $p^2 + 2 p q + q^2 = 1$, and $p + q = 1$ where $p$ is the frequency of the dominant allele, $q$ is the frequency of the recessive allele, and $p^2$, $q^2$, and $2pq$ are the frequencies of dominant homozygous, recessive homozygous, and heterozygous individuals, respectively. \u200bThe frequency of the recessive allele (q) is $\\sqrt{\frac{1}{400}} = 0.05$. We have $p = 1 - q = 0.95$. The frequency of heterozygous individuals is $2pq = 2 \\cdot 0.05 \\cdot 0.95 = 0.095$. The number of heterozygous individuals is equal to the frequency of heterozygous individuals times the size of the population, or $0.095 * 400 = 38$. So we end up with 38/400. The answer is (D).\n\nQ: According to the pressure-flow model of movement of phloem contents, photosynthate movement from source to sink is driven by\n(A) an ATP-dependent pressure-flow pump (B) a water-pressure potential gradient (C) transpiration (D) apoplastic diffusion\nA: Let's think step by step. It is a gradient in water pressure that induces the movement of phloem content, which refers to answer (B). The mechanism of movement does not rely on metabolism, which rules out (A). Transpiration refers to the exhalation of water vapor through plant stomata, and is also not related, which rules out (C). While the apoplastic pathway is one of two main pathways for water transport in plants, it is not central to the pressure flow model, which rules out (D). The answer is (B).\n\nQ: Which of the following contain DNA sequences required for the segregation of chromosomes in mitosis and meiosis?\n(A) Telomeres (B) Centromeres (C) Nucleosomes (D) Spliceosomes\nA: Let's think step by step. The genetic material in Telomeres is not used, which rules out (A). Nucleosomes are the repeating subunit that comprises chromatin packed in a cell nucleus, and do not specifically refer to DNA sequences necessary for segregating chromosomes in cell division, which rules out (C). A spliceosome is a large ribonucleoprotein that removes introns from transcribed pre-mRNA rather than governing chromosome segregation. Centromeres are directly responsible for segregating chromosomes in cell division. The answer is (B).\n\nQ: The presence of homologous structures in two different organisms, such as the humerus in the front limb of a human and a bird, indicates that\n(A) the human and bird are polyphyletic species (B) a human's and bird's evolution is convergent (C) the human and bird belong to a clade (D) the human and bird developed by analogy\nA: Let's think step by step. Polyphyletic species are organisms that are grouped due to having similar characteristics but which do not have a common ancestor. This is not the case for humans and birds, which rules out (A). Convergent evolution refers to the indepdendent development of similar features in different species at different periods, which is also not the case for humans and birds, which rules out (B). Analogy refers to the superficial resemblance of structures that have different origins, which is not the case for the human and bird forearms, which rules out (D). Humans and birds do belong to the same clade - a group of organisms composed of a common ancestor. The answer is (C).", "college_chemistry": "The following are multiple choice questions (with answers) about college chemistry.\n\nQ: 3 Cl\u2212(aq) + 4 CrO_4^2\u2212(aq) + 23 H+(aq) \u2192 3 HClO2(aq) + 4 Cr3+(aq) + 10 H2O(l). In the reaction shown above, Cl\u2212(aq) behaves as\n(A) an acid (B) a base (C) a catalyst (D) a reducing agent\nA: Let's think step by step. A molecule that behaves as a base accepts an H+ ion (or proton) from another molecule, whereas a molecule that behaves as an acid donates an H+ ion (or proton) to another molecule. Neither of these is the case for Cl in this reaction, which rules out (A) and (B). A catalyst is a substance that only accelerates a reaction without itself undergoing chemical change, which is not the case here. This rules out (C). Instead, the $Cl^{-} molecules carry a negative charge, which they donate in the reaction to form 3 HClO2. This is the behavior of a reducing agent, or (D). The answer is (D).\n\nQ: Which of the following statements about the lanthanide elements is NOT true?\n(A) The most common oxidation state for the lanthanide elements is +3. (B) Lanthanide complexes often have high coordination numbers (> 6). (C) All of the lanthanide elements react with aqueous acid to liberate hydrogen. (D) The atomic radii of the lanthanide elements increase across the period from La to Lu.\nA: Let's think step by step. The atomic radii of the lanthanide elements in fact decrease across the period from La to Lu. Options (A), (B), and (C) are all true. This means that only (D) is NOT true. The answer is (D).\n\nQ: Which of the following lists the hydrides of group-14 elements in order of thermal stability, from lowest to highest?\n(A) PbH4 < SnH4 < GeH4 < SiH4 < CH4 (B) PbH4 < SnH4 < CH4 < GeH4 < SiH4 (C) CH4 < SiH4 < GeH4 < SnH4 < PbH4 (D) CH4 < PbH4 < GeH4 < SnH4 < SiH4\nA: Let's think step by step. The thermal stability of group-14 hydrides decreases as we move from the top of group 14 to the bottom. The order of elements in the group from top to bottom is C, Si, Ge, Sn, Pb. Therefore in order of increasing thermal stability we have PbH4, SnH4, GeH4, SiH4, and CH4, or answer (A). The answer is (A).\n\nQ: Predict the number of lines in the EPR spectrum of a solution of 13C-labelled methyl radical (13CH3\u2022), assuming the lines do not overlap.\n(A) 4 (B) 3 (C) 6 (D) 24 (E) 8\nA: Let's think step by step. The electron paramagnetic resonance spectrum will be split by two forms of interactions. The first is the hyperfine interaction with the 13C (nuclear spin $I = \nrac{1}{2}$) which will split the spectrum into 2 lines. This will be further split into 4 lines by the interaction with three equivalent 1H nuclei. The total number of lines is therefore $2 \\cdot 4 = 8$. The answer is (E).", "college_computer_science": "The following are multiple choice questions (with answers) about college computer science.\n\nQ: Which of the following regular expressions is equivalent to (describes the same set of strings as) (a* + b)*(c + d)?\n(A) a*(c + d)+ b(c + d)\n(B) a*(c + d)* + b(c + d)*\n(C) a*(c + d)+ b*(c + d)\n(D) (a + b)*c +(a + b)*d\nA: Let's think step by step. We know that:\n1. (X* + Y)* = (X + Y)*\n2. X(Y + Z)? = XY + XZ\nUsing equation 1 we can rewrite (a* + b)*(c + d)? as:\n3. (a + b)*(c + d)?\nUsing equation 2 we can rewrite equation 3 as:\n(a + b)*c + (a + b)*d The answer is (D).\n\nQ: The Singleton design pattern is used to guarantee that only a single instance of a class may be instantiated. Which of the following is (are) true of this design pattern?\nI. The Singleton class has a static factory method to provide its instance.\nII. The Singleton class can be a subclass of another class.\nIII. The Singleton class has a private constructor.\n(A) I only\n(B) II only\n(C) III only\n(D) I, II, and III\nA: Let's think step by step. Statement I is a correct statement about a Singleton, because a Singleton restricts instantiation to a single, static method. Statement II is also correct, because there is no inherent restriction regarding the inheritance of a Singleton. Statement III is also correct, because a Singletons must be instantiated only once, so its constructor is made private to prevent any construction except via its static factory method.\nGiven these facts, statements I, II, and III are all correct. The answer is (D).\n\nQ: A certain pipelined RISC machine has 8 general-purpose registers R0, R1, . . . , R7 and supports the following operations:\nADD Rs1, Rs2, Rd (Add Rs1 to Rs2 and put the sum in Rd)\nMUL Rs1, Rs2, Rd (Multiply Rs1 by Rs2 and put the product in Rd)\nAn operation normally takes one cycle; however, an operation takes two cycles if it produces a result required by the immediately following operation in an operation sequence.\nConsider the expression AB + ABC + BC, where variables A, B, C are located in registers R0, R1, R2. If the contents of these three registers must not be modified, what is the minimum number of clock cycles required for an operation sequence that computes the value of AB + ABC + BC?\n(A) 5 (B) 6 (C) 7 (D) 8\nA: Let's think step by step. First, we are given that A is in R0, B is in R1, and C is in R2.\nNext, we can see that we must compute three multiplies (AB, BC, and ABC) and two adds (AB + ABC, (AB + ABC) + BC) to compute our final answer, resulting in a minimum of five clock cycles.\nNext, we can see that there is no way to avoid at least one pipeline stall when computing our final answer, because to compute our final sum we must wait at least one cycle for the results from the previous stage to be ready. Thus, our minimum number of cycles must be 6.\nWe can verify that we can create a solution that requires only six cycles as follows:\ncompute AB: MUL R0, R1, R3\ncompute BC: MUL R1, R2, R4\ncompute ABC: MUL R3, R4, R5\ncompute AB + BC: ADD R3, R4, R6\nSTALL\ncompute AB + ABC + BC: ADD R5, R6, R7\nSo there are 6 cycles. The answer is (B).\n\nQ: A compiler generates code for the following assignment statement.\nG := (A + B) * C - (D + E) * F\nThe target machine has a single accumulator and a single-address instruction set consisting of instructions load, store, add, subtract, and multiply. For the arithmetic operations, the left operand is taken from the accumulator and the result appears in the accumulator. The smallest possible number of instructions in the resulting code is\n(A) 5 (B) 6 (C) 7 (D) 9\nA: Let's think step by step. We can compute the final answer with the following sequence of operations:\n1. LOAD D (accumulator = D)\n2. ADD E (accumulator = D+E)\n3. MUL F (accumulator = (D+E)*F)\n4. STORE X (X = (D+E)*F)\n5. LOAD A (accumulator = A)\n6. ADD B (accumulator = A+B)\n7. MUL C (accumulator = (A+B)*C)\n8. SUB X (accumulator = (A+B)*C - (D+E)*F)\n9. STORE G (G = (A+B)*C - (D+E)*F)\nThis sequence takes 9 instructions. The answer is (D).\n\nQ: Consider a computer design in which multiple processors, each with a private cache memory, share global memory using a single bus. This bus is the critical system resource. Each processor can execute one instruction every 500 nanoseconds as long as memory references are satisfied by its local cache. When a cache miss occurs, the processor is delayed for an additional 2,000 nanoseconds. During half of this additional delay, the bus is dedicated to serving the cache miss. During the other half, the processor cannot continue, but the bus is free to service requests from other processors. On average, each instruction requires 2 memory references. On average, cache misses occur on 1 percent of references. What proportion of the capacity of the bus would a single processor consume, ignoring delays due to competition from other processors?\n(A) 1/50 (B) 1/27 (C) 1/25 (D) 2/27\nA: Let's think step by step. We know that each instruction requires two memory references per instruction, and that there is an average cache miss rate of one percent.\nThus a given processor has:\n(1 cache miss / 100 references) * (2 references / instruction) =\n(2 cache misses / 100 instructions), so:\nmisses_per_instruction = 1 cache miss / 50 instructions.\nNext, we know that each instruction requires 500 nanoseconds when there is no cache miss, and 500 + 2000 = 2500 nanoseconds when there is a cache miss. Thus:\n50 instructions / (49 * 500) + (1 * 2500) nanoseconds, so:\ninstructions_per_ns = 50 instructions / 27000 nanoseconds.\nNow, we know that each cache miss locks the bus for half of the 2000 nanosecond cache miss delay, or 1000 nanoseconds, so:\nlock_ns_per_miss = 1000 nanoseconds / cache miss.\nThus we can see that on average a single processor will lock the bus for:\nlock_ns_per_miss * misses_per_instruction * instructions_per_ns =\n(1000 nanoseconds / cache miss) * (1 cache miss / 50 instructions) * (50 instructions / 27000 nanoseconds) = 1000 * (1/50) * (50/27000) = 1000/27000 = 1/27. The answer is (B).", "college_mathematics": "The following are multiple choice questions (with answers) about college mathematics.\n\nQ: Let V be the set of all real polynomials p(x). Let transformations T, S be defined on V by T:p(x) -> xp(x) and S:p(x) -> p'(x) = d/dx p(x), and interpret (ST)(p(x)) as S(T(p(x))). Which of the following is true?\n(A) ST = 0 (B) ST = T (C) ST = TS (D) ST - TS is the identity map of V onto itself.\nA: Let's think step by step. For a given polynomial $p$ we have\n\\[ST(p) = (xp(x))\u2019 = p(x) + xp\u2019(x)\\]\nand\n\\[TS(p) = xp\u2019(x).\\]\nHence \\[ST(p) - TS(p) = p(x) + xp\u2019(x) - xp\u2019(x).\\] The answer is (D).\n\nQ: Suppose that f(1 + x) = f(x) for all real x. If f is a polynomial and f(5) = 11, then f(15/2)\n(A) -11 (B) 0 (C) 11 (D) 33/2\nA: Let's think step by step. The only polynomial so that $f(1 + x) = f(x)$ is a constant polynomial. Hence $f(5) = 11 = f(15/2)$. The answer is (C).\n\nQ: Let A be a real 2x2 matrix. Which of the following statements must be true?\nI. All of the entries of A^2 are nonnegative.\nII. The determinant of A^2 is nonnegative.\nIII. If A has two distinct eigenvalues, then A^2 has two distinct eigenvalues.\n(A) I only (B) II only (C) III only (D) II and III only\nA: Let's think step by step. We have \\[ det(A^2) = (det(A))^2 \\geq 0,\\] hence II holds.\nIII is false: as a counterexample take a diagonal matrix with -1 and 1 on the diagonal. Then $A^2$ is the identity matrix. The answer is (B).\n\nQ: Let A be the set of all ordered pairs of integers (m, n) such that 7m + 12n = 22. What is the greatest negative number in the set B = {m + n : (m, n) \\in A}?\n(A) -5 (B) -4 (C) -3 (D) -2\nA: Let's think step by step. We have 12n = 22 - 7m and one of the solutions is $m = -2$, $n = 3$. Then $m + n = 1$, hence we need to look for smaller $m$ in order to make $m + n$ negative. The next solution is $m = -14$ and $n = 10$. For smaller $m$ we have $m + n$ smaller than $-4$. The answer is (B).\n\nQ: A tank initially contains a salt solution of 3 grams of salt dissolved in 100 liters of water. A salt solution containing 0.02 grams of salt per liter of water is sprayed into the tank at a rate of 4 liters per minute. The sprayed solution is continually mixed with the salt solution in the tank, and the mixture flows out of the tank at a rate of 4 liters per minute. If the mixing is instantaneous, how many grams of salt are in the tank after 100 minutes have elapsed?\n(A) 2 (B) 2 - e^-2 (C) 2 + e^-2 (D) 2 + e^-4\nA: Let's think step by step. For all $t \\in \\mathbb{R}$, let $s(t)$ denote the number grams of salt in the tank at the $t$ minute mark. Then $s(0) = 3$.\nWe use $s$ and $s(t)$ interchangeably. We also use $s^{\\prime}$ and $s^{\\prime}(t)$ interchangeably. The solution sprayed into the tank adds $(0.02) 4=2 / 25$ grams of salt per minute. There are always 100 liters of liquid in the tank, containing $s$ grams of salt. So the density of salt in the tank is $s / 100$ grams per liter. The flow of water out of the tank therefore subtracts $4(s / 100)=s / 25$ grams of salt per minute. Then, for all $t \\in \\mathbb{R}$, we have $s^{\\prime}(t)=(2 / 25)-(s / 25)=(2-s) / 25$, and so $[s(t)=2] \\Rightarrow\\left[s^{\\prime}(t)=0\right]$. For all $t \\in \\mathbb{R}$,\n$$\n\frac{d}{d t}[\\ln (s-2)]=\frac{s^{\\prime}}{s-2}=\frac{-1}{25}=\frac{d}{d t}\\left[-\frac{t}{25}\right] .\n$$\nChoose $C \\in \\mathbb{R}$ such that, for all $t \\in \\mathbb{R}, \\ln ((s(t)-2))=-[t / 25]+C$. Let $K:=e^{C}$. Then, for all $t \\in \\mathbb{R}$, we have $(s(t))-2=K e^{-t / 25}$, and so $s(t)=2+K e^{-t / 25}$. Then $3=s(0)=2+K e^{0}=2+K$, so $K=1$. Then $s(100)=2+K e^{-100 / 25}=2+1 \\cdot e^{-4}=2+e^{-4}$. The answer is (D).", "college_medicine": "The following are multiple choice questions (with answers) about college medicine.\n\nQ: An expected side effect of creatine supplementation is:\n(A) muscle weakness. (B) gain in body mass. (C) muscle cramps. (D) loss of electrolytes.\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Creatine supplementation is a dietary supplement that results in body mass gain. The answer is (B).\n\nQ: Which of the following is not a true statement?\n(A) Muscle glycogen is broken down enzymatically to glucose-1-phosphate (B) Elite endurance runners have a high proportion of Type I fibres in their leg muscles (C) Liver glycogen is important in the maintenance of the blood glucose concentration (D) Insulin promotes glucose uptake by all tissues in the body\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Let\u2019s solve this step by step and go over each choice: \n(A) \u201cMuscle glycogen is broken down enzymatically to glucose-1-phosphate\u201d: This is a correct statement.\n(B) \u201cElite endurance runners have a high proportion of Type I fibres in their leg muscles\u201d: This is a correct statement.\n(C) \u201cLiver glycogen is important in the maintenance of the blood glucose concentration\u201d: This is a correct statement. \n(D) \u201cInsulin promotes glucose uptake by all tissues in the body\u201d: This is not a correct statement, because insulin promotes glucose uptake by the liver, adipose tissue, and muscle, but not all tissues. For instance, the tissues in the brain and red blood cells are not affected by insulin. The answer is (D).\n\nQ: A high school science teacher fills a 1 liter bottle with pure nitrogen and seals the lid. The pressure is 1.70 atm, and the room temperature is 25\u00b0C. Which two variables will both increase the pressure of the system, if all other variables are held constant?\n(A) Increasing temperature, increasing moles of gas (B) Increasing temperature, increasing volume (C) Decreasing volume, decreasing temperature (D) Decreasing moles of gas, increasing volume\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The relevant equation for this is the ideal gas law: PV=nRT. To increase the pressure of the system (P), then either n (number of moles of the gas) or T (temperature) have to increase. The answer is (A).\n\nQ: In a genetic test of a newborn, a rare genetic disorder is found that has X-linked recessive transmission. Which of the following statements is likely true regarding the pedigree of this disorder?\n(A) All descendants on the maternal side will have the disorder. (B) Females will be approximately twice as affected as males in this family. (C) All daughters of an affected male will be affected. (D) There will be equal distribution of males and females affected.\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Let\u2019s solve this step by step. Let's recall first that females have two X chromosomes, while males have one X and one Y chromosome. This is an important fact we need to know before answering this question. \nBecause a male can only pass his only one X chromosome to a daughter, if he is affected by this rare genetic disorder, then we know for sure that he will pass this rare genetic disorder to all his future-born daughters. Therefore, \u201c(C): All daughters of an affected male will be affected\u201d is a correct statement. The answer is (C).\n\nQ: Glucose is transported into the muscle cell:\n(A) via protein transporters called GLUT4. (B) only in the presence of insulin. (C) via hexokinase. (D) via monocarbylic acid transporters.\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Glucose (also known as the blood sugar) is the main sugar found in the human body. It is transported into the muscle cell via diffusion through protein transporters called GLUT4. The answer is (A).", "college_physics": "The following are multiple choice questions (with answers) about college physics.\n\nQ: A refracting telescope consists of two converging lenses separated by 100 cm. The eye-piece lens has a focal length of 20 cm. The angular magnification of the telescope is\n(A) 4 (B) 5 (C) 6 (D) 20\nA: Let's think step by step. In a refracting telescope, if both lenses are converging, the focus of both lenses must be between the two lenses, and thus the focal lengths of the two lenses must add up to their separation. Since the focal length of one lens is 20 cm, the focal length of the other must be 80 cm. The magnification is the ratio of these two focal lengths, or 4. The answer is (A).\n\nQ: The muon decays with a characteristic lifetime of about 10^-6 second into an electron, a muon neutrino, and an electron antineutrino. The muon is forbidden from decaying into an electron and just a single neutrino by the law of conservation of\n(A) charge (B) mass (C) energy and momentum (D) lepton number\nA: Let's think step by step. Lepton number must be conserved, meaning the total number of leptons minus the number of antileptons. If a muon decays into an electron and a single neutrino, the total lepton number would go from one to two, violating lepton number conservation. The answer is (D).\n\nQ: One end of a Nichrome wire of length 2L and cross-sectional area A is attached to an end of another Nichrome wire of length L and cross- sectional area 2A. If the free end of the longer wire is at an electric potential of 8.0 volts, and the free end of the shorter wire is at an electric potential of 1.0 volt, the potential at the junction of the two wires is most nearly equal to\n(A) 2.4 V (B) 3.3 V (C) 4.5 V (D) 5.7 V\nA: Let's think step by step. This is a simple voltage divider problem, where the longer wire has a resistance four times that of the shorter end. So the voltage divider ratio is 1 / 5, meaning that the potential in the middle is 1.0 V + (8.0 V - 1.0 V) * 1/5 = 2.4 V. The answer is (A).\n\nQ: A refracting telescope consists of two converging lenses separated by 100 cm. The eye-piece lens has a focal length of 20 cm. The angular magnification of the telescope is\n(A) 4 (B) 5 (C) 6 (D) 20\nA: Let's think step by step. In a refracting telescope, if both lenses are converging, the focus of both lenses must be between the two lenses, and thus the focal lengths of the two lenses must add up to their separation. Since the focal length of one lens is 20 cm, the focal length of the other must be 80 cm. The magnification is the ratio of these two focal lengths, or 4. The answer is (A).\n\nQ: For which of the following thermodynamic processes is the increase in the internal energy of an ideal gas equal to the heat added to the gas?\n(A) Constant temperature (B) Constant volume (C) Constant pressure (D) Adiabatic\nA: Let's think step by step. Heat added to the gas can go into the gases internal energy or work done against an external force. However, if the volume of the gas container is constant, no work will be done (since work is pressure times change in volume). So, at constant volume, all of the heat goes into the internal energy. The answer is (B).", "computer_security": "The following are multiple choice questions (with answers) about computer security.\n\nQ: SHA-1 has a message digest of\n(A) 160 bits (B) 512 bits (C) 628 bits (D) 820 bits\nA: Let's think step by step. Since SHA-1 is a hash function which takes an input and produces a 160-bit (20-byte) hash value, its message digest is 160 bits. The answer is (A).\n\nQ: _____________ can modify data on your system \u2013 so that your system doesn\u2019t run correctly or you can no longer access specific data, or it may even ask for ransom in order to give your access.\n(A) IM \u2013 Trojans (B) Backdoor Trojans (C) Trojan-Downloader (D) Ransom Trojan\nA: Let's think step by step. The system is asking for trojans, which are for ransom, which means ransom trojan. The answer is (D).\n\nQ: What is ethical hacking?\n(A) \"Hacking\" ethics so they justify unintended selfish behavior (B) Hacking systems (e.g., during penetration testing) to expose vulnerabilities so they can be fixed, rather than exploited (C) Hacking into systems run by those whose ethics you disagree with (D) A slang term for rapid software development, e.g., as part of hackathons\nA: Let's think step by step. Ethical hacking is a process of detecting vulnerabilities in an application, system, or organization's infrastructure that an attacker can use to exploit an individual or organization. They use this process to prevent cyberattacks and security breaches by lawfully hacking into the systems and looking for weak points. The answer is (B).\n\nQ: The ____________ is anything which your search engine cannot search.\n(A) Haunted web (B) World Wide Web (C) Surface web (D) Deep Web\nA: Let's think step by step. The search engine searches on the Surface Web, which is the portion of the world wide web which is visible so (B,C) are wrong. The Haunted Web doesn\u2019t correspond to an internet concept. The Deep Web is the part of the World Wide Web which is not indexed. The answer is (D).\n\nQ: Exploitation of the Heartbleed bug permits\n(A) overwriting cryptographic keys in memory (B) a kind of code injection (C) a read outside bounds of a buffer (D) a format string attack\nA: Let's think step by step. The Heartbleed Bug is a serious vulnerability in the popular OpenSSL cryptographic software library. Heartbleed resulted from improper input validation (due to a missing bounds check) in the implementation of the TLS heartbeat extension. The vulnerability was classified as a buffer over-read, a situation where more data can be read than should be allowed. The answer is (C).", "conceptual_physics": "\nThe following are multiple choice questions (with answers) about conceptual physics.\n\nQ: Colors in a soap bubble result from light\n(A) converted to a different frequency (B) deflection (C) interference (D) polarization\nA: Let's think step by step. In a soap bubble film, the light bounces between the two soap-air interfaces many times, interfering with itself constructively or destructively depending on the width of the film. This results in different colors being visible. The answer is (C).\n\nQ: Compared with the mass of a uranium atom undergoing fission, the combined masses of the products after fission are\n(A) less (B) more (C) the same (D) zero\nA: Let's think step by step. Fission releases energy, which comes from the rest mass of its initial nucleus. Thus the mass of the products is less than the mass of the reactant uranium nucleus. The answer is (A).\n\nQ: Things that are equivalent according to the equivalence principle are\n(A) space and time. (B) a traveling twin and a stay-at-home twin. (C) gravity and acceleration. (D) mass and energy.\nA: Let's think step by step. Einstein\u2019s famous equivalence principle states that gravity and acceleration are equivalent. The answer is (C).\n\nQ: Which of these three elements has the most mass per nucleon?\n(A) Hydrogen (B) Iron (C) Uranium (D) Same in each\nA: Let's think step by step. Due to nuclear binding energy, the mass of an atomic nucleus is less than the sum of individual masses of the free constituent protons and neutrons; this is known as the mass defect. Hydrogen has no mass defect because it has only a single nucleon, so it will have the most mass per nucleon. The answer is (A).\n\nQ: A model airplane flies slower when flying into the wind and faster with wind at its back. When launched at right angles to the wind a cross wind its groundspeed compared with flying in still air is\n(A) the same (B) greater (C) less (D) either greater or less depending on wind speed\nA: Let's think step by step. The plane\u2019s speed in the direction of the wind is greater than it would be in the absence of wind, and its direction orthogonal to the wind is the same as it would be in the absence of the wind. The total speed, which is these two components added in quadrature, is thus greater than the speed in still air. The answer is (B).", "econometrics": "The following are multiple choice questions (with answers) about econometrics.\n\nQ: Suppose now that a researcher wishes to use information criteria to determine the optimal lag length for a VAR. 500 observations are available for the bi-variate VAR, and the values of the determinant of the variance-covariance matrix of residuals are 0.0336, 0.0169, 0.0084, and 0.0062 for 1, 2, 3, and 4 lags respectively. What is the optimal model order according to Akaike's information criterion?\n(A) 1 lag (B) 2 lags (C) 3 lags (D) 4 lags\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Let\u2019s solve this problem step by step. First of all, let\u2019s recall that for a given set of data, Akaike's information criterion (AIC) allows us to measure how well a statistical model fits the data; it is an estimator of prediction error. Here in this problem we will need to use the formula ln(det(sigma_hat)) + (2 * k / T) to determine the values of Akaike\u2019s criterion, where ln denotes the natural log function, det the determinant function, k the total number of parameters in total (across both equations), and T the number of observations (which, in this case, is equal to 500). For 1 lag, the number of parameters in total is equal to 6; for 2 lags, it is 10; for 3 lags, it is 14; and for 4 lags, it is 18. Now, let\u2019s calculate the values of the criterion for each lag:\n(A) 1 lag: ln(0.0336) + (2 * 6 / 500) = ln(0.0336) + (12 / 500) = -3.369\n(B) 2 lags: ln(0.0169) + (2 * 10 / 500) = ln(0.0169) + (20 / 500) = -4.040\n(C) 3 lags: ln(0.0084) + (2 * 14 / 500) = ln(0.0084) + (28 / 500) =-4.724\n(D) 4 lags: ln(0.0062) + (2 * 18 / 500) = ln(0.0062) + (36 / 500) =-5.011\nBecause the optimal model order according to AIC minimizes the information criterion, the answer should be the one with the lowest value. In this case, (D) has the lowest value. The answer is (C).\n\nQ: Consider the following AR(1) model with the disturbances having zero mean and unit variance\nyt = 0.2 + 0.4 yt-1 + ut\nThe (unconditional) mean of y will be given by\n(A) 0.2 (B) 0.4 (C) 0.5 (D) 0.33\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Let\u2019s solve this problem step by step. If we have a an AR(1) model with the disturbances having zero mean and unit variance, then the unconditional mean of y is equal to the following:\nunconditional mean of y = (the intercept term) / (1 - autoregressive coefficient)\nWe know that the intercept term is 0.2 and the autoregressive coefficient is 0.4; thus, we have:\nunconditional mean of y = (0.2) / (1 - 0.4) = (0.2) / (0.6) = 2 / 6 = 1 / 3, which is approximately 0.33. That means that the answer should be (D) 0.33. The answer is (D).\n\nQ: What would be then consequences for the OLS estimator if heteroscedasticity is present in a regression model but ignored?\n(A) It will be biased (B) It will be inconsistent (C) It will be inefficient (D) All of (a), (b) and (c) will be true.\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Heteroscedasticity refers to the condition where the variance of the error terms is not constant across multiple observations. If heteroscedasticity is present in a regression model, then the coefficient estimates in the OLS estimator will be not only unbiased and consistent but also inefficient. Because (A) and (B) are incorrect choices and (C) is a correct choice, (D) cannot be the right answer. Ultimately, (C) is the only true choice. The answer is (C).\n\nQ: Suppose that a test statistic has associated with it a p-value of 0.08. Which one of the following statements is true?\n(i) If the size of the test were exactly 8%, we would be indifferent between rejecting and not rejecting the null hypothesis\n(ii) The null would be rejected if a 10% size of test were used\n(iii) The null would not be rejected if a 1% size of test were used\n(iv) The null would be rejected if a 5% size of test were used.\n(A) (ii) and (iv) only (B) (i) and (iii) only (C) (i), (ii), and (iii) only (D) (i), (ii), (iii), and (iv).\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Let\u2019s reason about each of the options.\n(i) is a true statement.\n(ii) is a true statement.\n(iii) is a true statement.\n(iv) is not a true statement. Thus, (i), (ii), and (iii) are true. The answer is (C).\n\nQ: For a stationary autoregressive process, shocks will\n(A) Eventually die away (B) Persist indefinitely (C) Grow exponentially (D) Never occur\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. This is a formal logic problem about stationally process. For a stationary autoregressive process, shocks will eventually die away. The answer is (A).", "electrical_engineering": "\nThe following are multiple choice questions (with answers) about electrical engineering.\n\nQ: A point pole has a strength of 4\u03c0 * 10^-4 weber. The force in newtons on a point pole of 4\u03c0 * 1.5 * 10^-4 weber placed at a distance of 10 cm from it will be\n(A) 15 N. (B) 20 N. (C) 7.5 N. (D) 3.75 N.\nA: Let's think step by step. The force between two point poles is given by m_1m_2/(mu_0 4 \\pi r^2), in analogy to Coulomb\u2019s law. Plugging in the values given in the question, we calculate that the force is approximately 15 N. The answer is (A).\n\nQ: The coil of a moving coil meter has 100 turns, is 40 mm long and 30 mm wide. The control torque is 240*10-6 N-m on full scale. If magnetic flux density is 1Wb/m2 range of meter is\n(A) 1 mA. (B) 2 mA. (C) 3 mA. (D) 4 mA.\nA: Let's think step by step. The torque on a coil in a uniform magnetic field is given by BANI, where B is the magnetic flux density, A is the area of the coil, N is the number of turns, and I is the current. So we have that I = (Torque)/(BAN), or 240e-6/(1200e-6 * 100 * 1) = 2e-3. The answer is (B).\n\nQ: In an SR latch built from NOR gates, which condition is not allowed\n(A) S=0, R=0 (B) S=0, R=1 (C) S=1, R=0 (D) S=1, R=1\nA: Let's think step by step. An SR latch is a set-reset latch; in the case where S=1 and R=1, the circuit has no stable state; instead a race condition will be produced within the circuit, so the device will be in an undefined state. So S=1, R=1 is an illegal input. The answer is (D).\n\nQ: Two long parallel conductors carry 100 A. If the conductors are separated by 20 mm, the force per meter of length of each conductor will be\n(A) 100 N. (B) 0.1 N. (C) 1 N. (D) 0.01 N.\nA: Let's think step by step. The magnetic force-per-length between two current-carrying conductors is given by \\mu_0 I_1 I_2 / (2 \\pi r), where $r$ is the separation distance and I_1 and I_2 are the currents. Plugging in 100 A for I_1 and I_2, and 20 mm for r, gives 0.1 N. The answer is (B).\n\nQ: In a 2 pole lap winding dc machine , the resistance of one conductor is 2\u03a9 and total number of conductors is 100. Find the total resistance\n(A) 200\u03a9 (B) 100\u03a9 (C) 50\u03a9 (D) 10\u03a9\nA: Let's think step by step. In lap winding, effectively two resistors are connected in parallel, so the actual resistance of each pair is 1 Ohm. Since we have 50 pairs, we get a total resistance of 50 Ohms. The answer is (C).", "elementary_mathematics": "The following are multiple choice questions (with answers) about elementary mathematics.\n\nQ: Olivia used the rule \"Add 11\" to create the number pattern shown below. 10, 21, 32, 43, 54. Which statement about the number pattern is true?\n(A) The 10th number in the pattern will be an even number.\n(B) The number pattern will never have two even numbers next to each other.\n(C) The next two numbers in the pattern will be an even number then an odd number.\n(D) If the number pattern started with an odd number then the pattern would have only odd numbers in it.\nA: Let's think step by step. Choice A is incorrect because every even-numbered term in the pattern is odd, and 10 is an even number. Choice B is correct, because adding an odd number (in this case 11) to an odd number produces an even number, and adding an odd number to an even number produces an odd number. Thus the terms in the pattern will alternate between odd and even, so there will never be two even numbers next to each other. Choice C is incorrect because the last term in the example is even (54), and we know that the terms will alternate between even and odd. Choice D is incorrect because the terms in the pattern will alternate between odd and even, regardless of the value of the first term. The answer is (B).\n\nQ: The population of the city where Michelle was born is 145,826. What is the value of the 5 in the number 145,826?\n(A) 5 thousands\n(B) 5 hundreds\n(C) 5 tens\n(D) 5 ones\nA: Let's think step by step. Choice A is correct, because there are three digits following the 5, so\nthe 5 is in the thousands place. Thus the other choices are incorrect. The answer is (A).\n\nQ: A store sells 107 different colors of paint. They have 25 cans of each color in storage. The number of cans of paint the store has in storage can be found using the expression below. 107 \u00d7 25. How many cans of paint does the store have in storage?\n(A) 749\n(B) 2,675\n(C) 2,945\n(D) 4,250\nA: Let's think step by step. We can calculate 107 x 25 = (100 x 25) + (7 x 25) = 2500 + 175 = 2675. The answer is (B).\n\nQ: A total of 30 players will play basketball at a park. There will be exactly 5 players on each team. Which statement correctly explains how to find the number of teams needed?\n(A) Add 5 to 30 to find 35 teams.\n(B) Divide 30 by 5 to find 6 teams.\n(C) Multiply 30 and 5 to find 150 teams.\n(D) Subtract 5 from 30 to find 25 teams.\nA: Let's think step by step. We want to find the number of teams. We know that there are 5 players/team, and 30 players. Thus to get the number of teams we divide players by players/team, so 30 players / 5 players/team = 6 teams. The answer is (B).\n\nQ: Which expression is equivalent to 5 x 9?\n(A) (5 x 4) x (6 x 5)\n(B) (5 x 5) + (5 x 4)\n(C) (5 x 5) + (5 x 9)\n(D) (5 x 9) x (6 x 9)\nA: Let's think step by step. We know that 9 = (5 + 4), so 5 x 9 = 5 x (5 + 4) = (5 x 5) + (5 x 4). The answer is (B).", "formal_logic": "The following are multiple choice questions (with answers) about formal logic.\n\nQ: Which of the given formulas of PL is the best symbolization of the following sentence?\nTurtles live long lives and are happy creatures, unless they are injured.\n(A) (L \u2022 H) \u2261 I (B) (L \u2022 H) \u2228 I (C) L \u2022 (H \u2228 I) (D) L \u2022 (H \u2283 R).\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. Let \u201cL\u201d denote \u201cliving long\u201d, H \u201cbeing happy\u201d, and \u201cI\u201d \u201cbeing injured\u201d. Now, consider each choice:\n(A) means (living long AND being happy) is equivalent to (being injured). \n(B) means (living long AND being happy) OR (being injured). \n(C) means (living long) AND (being happy OR being injured). \n(D) means (living long) AND (being happy implies being R), but what R denotes is not clear.\nObviously, (B) is the best symbolization of the original sentence. The answer is (B).\n\nQ: Select the best translation into predicate logic.George borrows Hector's lawnmower. (g: George; h: Hector; l: Hector's lawnmower; Bxyx: x borrows y from z).\n(A) Blgh (B) Bhlg (C) Bglh (D) Bghl\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. We are told that \u201cBxyx\u201d means \u201cx borrows y from z\u201d. We can rewrite \u201cGeorge borrows Hector's lawnmower\u201d as \u201cGeorge borrows a lawnmower from Hector\u201d, which can then be translated into predicate logic as \u201cBglh\u201d. The answer \u201cBglh\u201d appears in (C); therefore, (C) must be the correct answer. The answer is (C).\n\nQ: \nSelect the best English interpretation of the given arguments in predicate logic.\nDm\n(\u2200x)(Wx \u2283 ~Dx). \n(\u2200x)Wx \u2228 Ag\t/ (\u2203x)Ax\n(A) Marina is a dancer. Some weaklings are not dancers. Either everything is a weakling or Georgia plays volleyball. So something plays volleyball. (B) Marina is a dancer. No weakling is a dancer. Everything is either a weakling or plays volleyball. So something plays volleyball. (C) Marina is a dancer. Some weaklings are not dancers. Everything is either a weakling or plays volleyball. So something plays volleyball. (D) Marina is a dancer. No weakling is a dancer. Either everything is a weakling or Georgia plays volleyball. So something plays volleyball.\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. Let \u201cD\u201d denote \u201cbeing a dancer\u201d, \u201cm\u201d denote \u201cMaria\u201d, \u201cg\u201d denote \u201cGeorgia\u201d, \u201cW\u201d denote \u201cweakling\u201d, \u201cA\u201d denote \u201cplaying volleyball\u201d. Then, we have the following:\n1. Dm \u2192 Maria is a dance.\n2. (\u2200x)(Wx \u2283 ~Dx). \u2192 For all x, if x is a weakling, then x is not a dancer. In other words, no weakling is a dancer.\n3. (\u2200x)Wx \u2228 Ag\t/ (\u2203x)Ax \u2192 For all x, x is a weakling or Georgia plays volleyball. So there exists an x that plays volleyball. \nOptions (A) and (C) do claim that some weaklings are not dancers, but the second argument strongly states that no weakling is a dancer. Thus, we can eliminate them. Option (B) omits the important detail about Georgia playing volleyball. Option (D) has all the details presented in the arguments and is the best English interpretation of the arguments. The answer is (D).\n\nQ: Select the best translation into predicate logic: No people drive on Mars.\n(A) ~Pd (B) (\u2200x)(Px \u2228 ~Dx) (C) (\u2200x)(Px \u2283 ~Dx) (D) ~Dp\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. Let \u201cP\u201d denote \u201cbeing on Mars\u201d and \u201cD\u201d denote \u201cdriving on Mars\u201d. Then let\u2019s consider each option:\nOption (A): ~Pd \u2192 d is not on Mars.\nOption (B): (\u2200x)(Px \u2228 ~Dx) \u2192 For all x, x is on Mars and x do not drive on Mars.\nOption (C): (\u2200x)(Px \u2283 ~Dx) \u2192 For all x, x is on Mars implies that x do not drive on Mars.\nOption (D): ~Dp: \u2192 p do not drive on Mars.\nOf all these options, Option (C) appears to be the best and most meaningful interpretation of the argument \u201cNo people drive on Mars.\u201d The answer is (C).", "global_facts": "The following are multiple choice questions (with answers) about global facts.\n\nQ: As of 2017, how many of the world\u2019s 1-year-old children today have been vaccinated against some disease? *\n(A) 80% (B) 60% (C) 40% (D) 20%\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. According to data published by the World Health Organization, the nummber of 1-year-old children vaccinated in 2017 exceeds 80%. The answer is (A).\n\nQ: As of 2019, about what percentage of Americans agree that the state is run for the benefit of all the people?\n(A) 31% (B) 46% (C) 61% (D) 76%\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. In 2019, about 46% percentage of Americans agree that the state is run for the benefit of all the people. The answer is (B).\n\nQ: As of 2019, about what percentage of Russians say it is very important to have free media in our country without government/state censorship?\n(A) 38% (B) 53% (C) 68% (D) 83%\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. As of 2019, about 38% of Russians say it is very important to have free media in our country. The answer is (A).\n\nQ: As of 2015, since 1990 forests have ____ in Europe and have ____ in Africa and the Americas.\n(A) increased, increased (B) increased, decreased (C) decreased, increased (D) decreased, decreased\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. As of 2015, since 1990 forests have increased in Europe and have decreased in Africa and the Americas. The answer is (B).\n\nQ: Which of the following pairs of statements are both true (as of 2019)?\n(A) People tend to be optimistic about their own future and the future of their nation or the world. (B) People tend to be optimistic about their own future but pessimistic about the future of their nation or the world. (C) People tend to be pessimistic about their own future but optimistic about the future of their nation or the world. (D) People tend to be pessimistic about their own future and the future of their nation or the world.\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. As of 2019, most people tend to be optimistic about their own future but pessimistic about the future of their nation or the world. The answer is (B).", "high_school_biology": "The following are multiple choice questions (with answers) about high school biology.\n\nQ: In animal cells, which of the following represents the most likely pathway that a secretory protein takes as it is synthesized in a cell?\n(A) Plasma membrane\u2013Golgi apparatus\u2013ribosome\u2013secretory vesicle\u2013rough ER (B) Ribosome\u2013Golgi apparatus\u2013rough ER\u2013secretory vesicle\u2013plasma membrane (C) Plasma membrane\u2013Golgi apparatus\u2013ribosome\u2013secretory vesicle\u2013rough ER (D) Ribosome\u2013rough ER\u2013Golgi apparatus\u2013secretory vesicle\u2013plasma membrane\nA: Let's think step by step. Protein synthesis starts at the ribosome, so we can eliminate (A) and (C). The ribosome is often in the endoplasmic reticulum and moves from there to the Golgi apparatus, where it is modified and packaged into a vesicle. The vesicle then floats to the plasma membrane and is secreted. The answer is (D).\n\nQ: A mutation in a bacterial enzyme changed a previously polar amino acid into a nonpolar amino acid. This amino acid was located at a site distant from the enzyme\u2019s active site. How might this mutation alter the enzyme\u2019s substrate specificity?\n(A) By changing the enzyme\u2019s pH optimum (B) By changing the enzyme\u2019s location in the cell (C) By changing the shape of the protein (D) An amino acid change away from the active site cannot alter the enzyme\u2019s substrate specificity.\nA: Let's think step by step. A change in an amino acid leads to a change in the primary structure of the protein. A change in the primary structure may lead to a change in the secondary and the tertiary structure of the protein. A change in the tertiary structure means a change in the shape of the protein, so (C) has to be correct. Since the change does not affect the active site of the enzyme, we do not expect the activity of the enzyme to be affected. The answer is (C).\n\nQ: Which of the following is not a way to form recombinant DNA?\n(A) Translation (B) Conjugation (C) Specialized transduction (D) Transformation\nA: Let's think step by step. The introduction of foreign DNA or RNA into bacteria or eukaryotic cells is a common technique in molecular biology and scientific research. There are multiple ways foreign DNA can be introduced into cells including transformation, transduction, conjugation, and transfection. In contrast, (A) is not a way to form DNA: during translation the ribosomes synthesize proteins from RNA. The answer is (A).\n\nQ: Homologous structures are often cited as evidence for the process of natural selection. All of the following are examples of homologous structures EXCEPT\n(A) the wings of a bird and the wings of a bat (B) the flippers of a whale and the arms of a man (C) the pectoral fins of a porpoise and the flippers of a seal (D) the forelegs of an insect and the forelimbs of a dog\nA: Let's think step by step. \u200b\u200bHomologous structures are similar physical features in organisms that share a common ancestor \u200b\u200bbut different functions. Comparisons (B) and (C) are clearly homologous because they share a common ancestor and the structures serve different purposes. Bat wings and birg wings are also homologous, while they are both wings, the forelimbs serve different purposes. Insects and dogs are very far ancestors since one is vertebrate while the other is invertebrate and the forelimbs serve the same purpose, so they are not homologous. The answer is (D).\n\nQ: Which of the following is not known to be involved in the control of cell division?\n(A) Cyclins (B) Protein kinases (C) Checkpoints (D) Fibroblast cells\nA: Let's think step by step. Normal cells move through the cell cycle in a regulated way. At the checkpoint stage, they use information about their own internal state and cues from the environment around them to decide whether to proceed with cell division. Cues like these act by changing the activity of core cell cycle regulators inside the cell. The most common regulators are cyclins and cyclin-dependent kinases. Fibroblast cells do not play any role in cell division. The answer is (D).", "high_school_chemistry": "The following are multiple choice questions (with answers) about high school chemistry.\n\nQ: Which of the following is considered an acid anhydride?\n(A) HCl (B) H2SO3 (C) SO2 (D) Al(NO3)3\nA: Let's think step by step. An acid anhydride is a compound that is derived by removing water from an acid. The chemical formula for water is H2O, which means that we need to determine which of these options, when combined with H2O, forms an acid. SO2, or Sulfur dioxide, when combined with H2O, makes H2SO4, or sulfuric acid. The answer is (C).\n\nQ: Which of the following is expected to be a polar molecule?\n(A) PCl4F (B) BF3 (C) CO2 (D) Si(CH3)4\nA: Let's think step by step. A polar molecule is one that has a slightly positive charge on one end of the molecule and a slightly negative charge on the other end. Boron trifluoride (BF3) has Boron as the center atom and three fluorine atoms attached to it; it is trigonal planar and symmetric, so it is nonpolar. Carbon Dioxide (CO2) has Carbon as the central atom with double bonds to two Oxygen atoms - this is also symmetrical and therefore nonpolar. The same is the case for tetramethyl silane (SI(CH3)4), which is a Silicon atom surrounded by four methyl groups. The structure of PCL4F is that Phosphorus is the central atom, attached to four chlorines and one fluorine atom. This is asymmetrical, and therefore has a net dipole and is expected to be a polar molecule. The answer is (A).\n\nQ: From the solubility rules, which of the following is true?\n(A) All chlorides, bromides, and iodides are soluble (B) All sulfates are soluble (C) All hydroxides are soluble (D) All ammonium-containing compounds are soluble\nA: Let's think step by step. The chlorides, bromides, and iodides of lead, silver, and mercury are not soluble in water. This rules out (A). The sulfates of lead, barium, and calcium are not soluble in water, which rules out (B). The hydroxides of any metal besides sodium, potassium, ammonium, calcium, and barium are insoluble. This rules out (C). Typically ammonium ions indicate a soluble ionic substance. The answer is (D).\n\nQ: A new compound is synthesized and found to be a monoprotic acid with a molar mass of 248 g/mol. When 0.0050 mol of this acid are dissolved in 0.500 L of water, the pH is measured as 3.89. What is the pKa of this acid?\n(A) 3.89 (B) 7.78 (C) 5.78 (D) 2.33\nA: Let's think step by step. Recall that $[A] = [H^{+}]$. Here, this is equal to $$10^{-3.89}$. Then we have $K_{a} = $\nrac{[H^{+}][A^{-}]}{[HA]} = \nrac{10^{-3.89} \\cdot 10^{-3.89}}{10^{-2}}. The resulting exponent is $-3.89 + (-3.89) - (-2) = 5.78$, therefore $K_a = 10^{-5.78}$. The $pK_a$ is the negative log of $K_a$, which is equal to $5.78$. The answer is (C).\n\nQ: A solution contains 2.00 mole of acetic acid, CH3COOH, and 1.00 mole of calcium acetate, Ca(CH3COO)2. The solution is able to resist the addition of a small amount of strong acid or strong base with only minor changes in the pH of the solution. Larger quantities of strong acid or strong base can cause a significant change in pH. How many moles of nitric acid, HNO3, may be added before the pH begins to change significantly?\n(A) 0.500 mole (B) 1.00 mole (C) 2.00 mole (D) 3.00 mole\nA: Let's think step by step. We would like to compute the buffer capacity of this solution. First we write the equation for the ionization of the weak acid, in this case of acetic acid. $CH_{3}COOH (aq) + H_{2}O \nightarrow H_{3}O^{+} + CH3COO^{-}$. The conjugate base is therefore the acetate ion. The added strong acid, Nitric acid, will react with the conjugate base. Therefore the maximum amount of acid that can be added will be equal to the amount of acetate ion, or 2 moles. The answer is (C).", "high_school_computer_science": "The following are multiple choice questions (with answers) about high school computer science.\n\nQ: Which of the following is an example of the use of a device on the Internet of Things (IoT) ?\n(A) A car alerts a driver that it is about to hit an object. (B) A hiker uses a G P S watch to keep track of her position. (C) A refrigerator orders milk from an online delivery service when the milk in the refrigerator is almost gone. (D) A runner uses a watch with optical sensors to monitor his heart rate.\nA: Let's think step by step. The term Internet of Things (IoT) refers to common devices which are connected to the internet, enabling new functionality. Choice A is incorrect because it does not describe an internet connected device. In choice B, the watch is only described as having GPS functionality but no internet connectivity. Choice C describes a common device (a refrigerator) which has internet connectivity enabling new functionality (online ordering). Choice D does not mention internet connectivity for the watch, only optical sensors. The answer is (C).\n\nQ: Many Web browsers allow users to open anonymous windows. During a browsing session in an anonymous window, the browser does not record a browsing history or a list of downloaded files. When the anonymous window is exited, cookies created during the session are deleted. Which of the following statements about browsing sessions in an anonymous window is true?\n(A) The activities of a user browsing in an anonymous window will not be visible to people who monitor the user's network, such as the system administrator. (B) Items placed in a Web store's shopping cart for future purchase during the anonymous browsing session will not be saved on the user's computer. (C) A user will not be able to log in to e-mail or social media accounts during the anonymous browsing session. (D) A user browsing in an anonymous window will be protected from viruses launched from any web sites visited or files downloaded.\nA: Let's think step by step. Choice A is incorrect as it only describes network traffic, which an anonymous browser does not change. Choice B is correct as it correctly describes how an anonymous browser will prevent saving data on the user\u2019s computer after the session is ended. Choice C is incorrect because an anonymous browser will not prevent logging in to email or social media accounts. Choice D is incorrect because an anonymous browser in itself performs no virus protection. The answer is (B).\n\nQ: In the program below, the initial value of X is 5 and the initial value of Y is 10.\nIF (X < 0){\n DISPLAY (\"Foxtrot\")\n} ELSE {\n IF (X > Y){\n DISPLAY (\"Hotel\")\n } ELSE {\n IF (Y > 0){\n DISPLAY (\"November\")\n } ELSE {\n DISPLAY (\"Yankee\")\n }\n }\n}\nWhat is displayed as a result of running the program?\n(A) Foxtrot (B) Hotel (C) November (D) Yankee\nA: Let's think step by step. Because X has the value 5, the first conditional IF (X < 0) is false, so we move to the first ELSE clause. Because X is 5 and Y is 10, the second conditional IF (X > Y) is false, so we move to the following ELSE clause. Since Y is 10, the conditional IF (Y > 0) is true, so the command DISPLAY (\"November\") is executed. The answer is (C).\n\nQ: What is the output of \"abc\"[::-1] in Python 3?\n(A) Error (B) abc (C) cba (D) c\nA: Let's think step by step. We know that the slicing operator [::-1] takes all of the elements in the string in reverse order, so we reverse the order of the string \"abc\", resulting in \"cba\". The answer is (C).\n\nQ: A list of numbers has n elements, indexed from 1 to n. The following algorithm is intended to display the number of elements in the list that have a value greater than 100. The algorithm uses the variables count and position. Steps 3 and 4 are missing.\n Step 1: Set count to 0 and position to 1.\n Step 2: If the value of the element at index position is greater than 100, increase the value of count by 1.\n Step 3: (missing step)\n Step 4: (missing step)\n Step 5: Display the value of count.\nWhich of the following could be used to replace steps 3 and 4 so that the algorithm works as intended?\n(A) Step 3: Increase the value of position by 1.\n Step 4: Repeat steps 2 and 3 until the value of count is greater than 100.\n(B) Step 3: Increase the value of position by 1.\n Step 4: Repeat steps 2 and 3 until the value of position is greater than n.\n(C) Step 3: Repeat step 2 until the value of count is greater than 100.\n Step 4: Increase the value of position by 1.\n(D) Step 3: Repeat step 2 until the value of position is greater than n.\n Step 4: Increase the value of count by 1.\nA: Let's think step by step. Choice A is incorrect, because its Step 4 has an incorrect termination condition, stopping when count is greater than 100. We need to stop after inspecting all elements in the list. Choice B is correct because it correctly increments both count and position, and correctly repeats these steps and terminates when all elements in the list have been inspected. Choice C is incorrect because it incorrectly increments the variable count until its value is greater than 100, regardless of the elements in the list. Choice D is incorrect because its step 3 does not increment the value of position, so it will repeat forever. The answer is (B).", "high_school_european_history": "The following are multiple choice questions (with answers) about high school european history.\n\nQ: This question refers to the following information.\nAlbeit the king's Majesty justly and rightfully is and ought to be the supreme head of the Church of England, and so is recognized by the clergy of this realm in their convocations, yet nevertheless, for corroboration and confirmation thereof, and for increase of virtue in Christ's religion within this realm of England, and to repress and extirpate all errors, heresies, and other enormities and abuses heretofore used in the same, be it enacted, by authority of this present Parliament, that the king, our sovereign lord, his heirs and successors, kings of this realm, shall be taken, accepted, and reputed the only supreme head in earth of the Church of England, called Anglicans Ecclesia; and shall have and enjoy, annexed and united to the imperial crown of this realm, as well the title and style thereof, as all honors, dignities, preeminences, jurisdictions, privileges, authorities, immunities, profits, and commodities to the said dignity of the supreme head of the same Church belonging and appertaining; and that our said sovereign lord, his heirs and successors, kings of this realm, shall have full power and authority from time to time to visit, repress, redress, record, order, correct, restrain, and amend all such errors, heresies, abuses, offenses, contempts, and enormities, whatsoever they be, which by any manner of spiritual authority or jurisdiction ought or may lawfully be reformed, repressed, ordered, redressed, corrected, restrained, or amended, most to the pleasure of Almighty God, the increase of virtue in Christ's religion, and for the conservation of the peace, unity, and tranquility of this realm; any usage, foreign land, foreign authority, prescription, or any other thing or things to the contrary hereof notwithstanding.\nEnglish Parliament, Act of Supremacy, 1534\nFrom the passage, one may infer that the English Parliament wished to argue that the Act of Supremacy would\n(A) give the English king a new position of authority (B) give the position of head of the Church of England to Henry VIII alone and exclude his heirs (C) establish Calvinism as the one true theology in England (D) end various forms of corruption plaguing the Church in England\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The Act of Supremacy states that it grants authority to the king \"to repress and extirpate all errors, heresies, and other enormities and abuses\", referring to the corruption in the Church of England. The answer is (D).\n\nQ: This question refers to the following information.\nRead the following excerpt.\nThe revolutionary seed had penetrated into every country and spread more or less. It was greatly developed under the r\u00e9gime of the military despotism of Bonaparte. His conquests displaced a number of laws, institutions, and customs; broke through bonds sacred among all nations, strong enough to resist time itself; which is more than can be said of certain benefits conferred by these innovators.\nThe monarchs will fulfil the duties imposed upon them by Him who, by entrusting them with power, has charged them to watch over the maintenance of justice, and the rights of all, to avoid the paths of error, and tread firmly in the way of truth. Placed beyond the passions which agitate society, it is in days of trial chiefly that they are called upon to despoil realities of their false appearances, and to show themselves as they are, fathers invested with the authority belonging by right to the heads of families, to prove that, in days of mourning, they know how to be just, wise, and therefore strong, and that they will not abandon the people whom they ought to govern to be the sport of factions, to error and its consequences, which must involve the loss of society.\nUnion between the monarchs is the basis of the policy which must now be followed to save society from total ruin. . . .\nLet them not confound concessions made to parties with the good they ought to do for their people, in modifying, according to their recognized needs, such branches of the administration as require it.\nLet them be just, but strong; beneficent, but strict.\nLet them maintain religious principles in all their purity, and not allow the faith to be attacked and morality interpreted according to the social contract or the visions of foolish sectarians.\nLet them suppress Secret Societies; that gangrene of society.\n\u2014Klemens von Metternich, Political Confession of Faith, 1820\nWhich of the following was the greatest cause of the fears expressed by Metternich in the document above?\n(A) The ideas of personal liberty and nationalism conceived during the Enlightenment resulted in radical revolutions that could spread throughout Europe. (B) The conquest of Europe by Napoleon led to the creation of new factions and shifted the European balance of power. (C) The power of monarchs had grown to the point where it needed to be checked by other powers within each nation or domination of civilians would occur. (D) The rising and falling economic cycle of the newly emerging capitalist economy could lead to civilian unrest that must be suppressed.\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The fears of revolution in early 19th century Europe expressed by Klemens von Metternich, a conservative Austrian statesman, were a direct result of the age of Enlightenment, a period of European history where the absolute power of the monarchy was challenged with ideas of individual liberty and nationalism, leading to the French revolution and its effects all over Europe. The answer is (A).\n\nQ: This question refers to the following information.\nThe excerpts below are from the Navigation Acts of 1651.\n[A]fter the first day of December, one thousand six hundred fifty and one, and from thence forwards, no goods or commodities whatsoever of the growth, production or manufacture of Asia, Africa or America, or of any part thereof; or of any islands belonging to them, or which are described or laid down in the usual maps or cards of those places, as well of the English plantations as others, shall be imported or brought into this Commonwealth of England, or into Ireland, or any other lands, islands, plantations, or territories to this Commonwealth belonging, or in their possession, in any other ship or ships, vessel or vessels whatsoever, but only in such as do truly and without fraud belong only to the people of this Commonwealth, or the plantations thereof, as the proprietors or right owners thereof; and whereof the master and mariners are also of the people of this Commonwealth, under the penalty of the forfeiture and loss of all the goods that shall be imported contrary to this act, , , ,\n[N]o goods or commodities of the growth, production, or manufacture of Europe, or of any part thereof, shall after the first day of December, one thousand six hundred fifty and one, be imported or brought into this Commonwealth of England, or any other lands or territories to this Commonwealth belonging, or in their possession, in any ship or ships, vessel or vessels whatsoever, but in such as do truly and without fraud belong only to the people of this Commonwealth, and in no other, except only such foreign ships and vessels as do truly and properly belong to the people of that country or place, of which the said goods are the growth, production or manufacture.\nWhich of the following best describes the outcome of the Navigation Acts of 1651?\n(A) They served as a catalyst for the growth of English shipping and overseas trade, but did little to limit the prospects of the Dutch in the seventeenth century. (B) They brought about almost immediate hardships for the Dutch economy as their dominance of overseas trade quickly ended. (C) They were rescinded during the restoration of the Stuarts as they sought normal diplomatic relations with the Dutch so not as to need Parliament's financial support for war. (D) They led to nearly a century of recurrent war between England and the Netherlands, which would not end until after American independence.\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The Navigation Acts of 1651 helped English shipping by restricting the ability of ships from other European countries, especially the Dutch, to transport goods from colonies in Asia and Africa into England. The answer is (A).\n\nQ: This question refers to the following information.\nIn Russia there was nothing going on well, and [Souvarine] was in despair over the news he had received. His old companions were all turning to the politicians; the famous Nihilists who made Europe tremble-sons of village priests, of the lower middle class, of tradesmen-could not rise above the idea of national liberation, and seemed to believe that the world would be delivered-when they had killed their despot&\u2026\n\"Foolery! They'll never get out of it with their foolery.\"\nThen, lowering his voice still more, in a few bitter words he described his old dream of fraternity. He had renounced his rank and his fortune; he had gone among workmen, only in the hope of seeing at last the foundation of a new society of labour in common. All the sous in his pockets had long gone to the urchins of the settlement; he had been as tender as a brother with the colliers, smiling at their suspicion, winning them over by his quiet workmanlike ways and his dislike of chattering. But decidedly the fusion had not taken place.\nHis voice changed, his eyes grew bright, he fixed them on \u00e9tienne, directly addressing him:\n\"Now, do you understand that? These hatworkers at Marseilles who have won the great lottery prize of a hundred thousand francs have gone off at once and invested it, declaring that they are going to live without doing anything! Yes, that is your idea, all of you French workmen; you want to unearth a treasure in order to devour it alone afterwards in some lazy, selfish corner. You may cry out as much as you like against the rich, you haven't got courage enough to give back to the poor the money that luck brings you. You will never be worthy of happiness as long as you own anything, and your hatred of the bourgeois proceeds solely from an angry desire to be bourgeois yourselves in their place.\"\n\u00e9mile Zola, French writer, Germinal, 1885\nThe passage displays the direct concern for the welfare of the working classes that was typically a part of which movement?\n(A) Capitalist (B) Scientific (C) Communist (D) Existentialist\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The modern Communist movement aims to establish a classless society based on communal ownership and distribution of property and means of production, thereby especially benefiting the working classes. The answer is (C).\n\nQ: This question refers to the following information.\nThe following excerpt is from a pamphlet.\nYou will do me the justice to remember, that I have always strenuously supported the Right of every man to his own opinion, however different that opinion might be to mine. He who denies to another this right, makes a slave of himself to his present opinion, because he precludes himself the right of changing it.\nThe most formidable weapon against errors of every kind is Reason. I have never used any other, and I trust I never shall.\nThe circumstance that has now taken place in France of the total abolition of the whole national order of priesthood, and of everything appertaining to compulsive systems of religion, and compulsive articles of faith, has not only precipitated my intention, but rendered a work of this kind exceedingly necessary, lest in the general wreck of superstition, of false systems of government, and false theology, we lose sight of morality, of humanity, and of the theology that is true.\nI believe in one God, and no more; and I hope for happiness beyond this life.\nI believe in the equality of man; and I believe that religious duties consist in doing justice, loving mercy, and endeavoring to make our fellow-creatures happy.\nI do not believe in the creed professed by the Jewish church, by the Roman church, by the Greek church, by the Turkish church, by the Protestant church, nor by any church that I know of. My own mind is my own church.\nAll national institutions of churches, whether Jewish, Christian or Turkish, appear to me no other than human inventions, set up to terrify and enslave mankind, and monopolize power and profit.\nI do not mean by this declaration to condemn those who believe otherwise; they have the same right to their belief as I have to mine.\n\u2014Thomas Paine, The Age of Reason, 1794\u20131795\nWhich of the following Enlightenment philosophes designed a system of checks and balances for government to avoid abuses of power?\n(A) Jean Jacques Rousseau (B) Baron Montesquieu (C) Mary Wollstonecraft (D) Adam Smith\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. Baron Montesquieu was a 18th centrury French philsopher who wrote extensively against the monoplization of power and advocated for a system of checks and balances in government to prevent the rise of despotism. The answer is (B).", "high_school_geography": "The following are multiple choice questions (with answers) about high school geography.\n\nQ: Which one of the following items is an example of nonmaterial culture?\n(A) Dove soap (B) Dove candy bar (C) Dove symbol (D) A dove (bird).\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. Nonmaterial culture consists of cultural ideas, beliefs or symbols that are not physical objects. The answer is (C).\n\nQ: During the third stage of the demographic transition model, which of the following is true?\n(A) Birth rates increase and population growth rate is less rapid. (B) Birth rates decline and population growth rate is less rapid. (C) Birth rates increase and population growth rate increases. (D) Birth rates decrease and population growth rate increases.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. The demographic transition model models the five different stages of population growth as a country goes through economic development, where the third stage refers to a period of declining birth rates and lower population growth. The answer is (B).\n\nQ: The practice of hiring a foreign third-party service provider to run an operation is called\n(A) outsourcing. (B) offshoring. (C) maquiladoras. (D) locational interdependence.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. \"Offshoring\" literally means to move or base some of the activities or processes of a company to a foreign country. The answer is (B).\n\nQ: Which of the following statements is NOT accurate regarding the services provided by local governments in the United States?\n(A) Duplication of efforts occurs often. (B) Social problems of the central city spill over into the surrounding residential suburbs. (C) Inefficiency in providing services occurs often. (D) One neighborhood's efforts to reduce pollution are always supported by neighboring communities.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. There may be economic, social or political reasons for two neighboring communities and their local governments not agreeing to pollution reduction efforts initiated by one of them. The answer is (D).\n\nQ: The rate of natural increase of a population is found by subtracting the\n(A) crude death rate from the crude birth date. (B) crude birth rate from the crude death rate. (C) doubling time from the crude birth rate. (D) fertility rate from the crude death rate.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. The difference between number of births and deaths gives the population increase at any given time. The answer is (A).", "high_school_government_and_politics": "The following are multiple choice questions (with answers) about high school government and politics.\n\nQ: Which of the following best states an argument made by James Madison in The Federalist number 10?\n(A) Honest politicians can prevent factions from developing. (B) Factions are more likely to occur in large republics than in small ones. (C) The negative effects of factionalism can be reduced by a republican government. (D) Free elections are the people's best defense against factionalism.\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. In the Federalist number 10, James Madison advocated for a representative republican form of government to guard against factionalism. The answer is (C).\n\nQ: The term \"budget deficit\" refers to the\n(A) annual increase in federal spending on the military (B) amount of interest on the national debt (C) difference between the initial budget proposals made by the president and Congress (D) amount the government spends in excess of its revenues\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. When the goverment spends more than it earns, their difference is the budget deficit. The answer is (D).\n\nQ: Which of the following statements about cabinet departments is FALSE?\n(A) They are established by the legislative branch. (B) Their members often don't have much influence over presidential decisions. (C) They cannot all be run by leaders who belong to the same political party the president does. (D) Not every federal agency is a cabinet department.\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. There is no law stipulating that some cabinet department leaders have to belong to a political party different from that of the president. The answer is (C).\n\nQ: Which of the following cases established the precedent that a defendant must be informed of the right to remain silent, the right to a lawyer, and protection from self-incrimination?\n(A) Weeks v. United States (B) Betts v. Brady (C) Mapp v. Ohio (D) Miranda v. Arizona\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. In the landmark Miranda v. Arizona in 1966, the US Supreme Court, based on the Fifth and Sixth Amendment of the US Constitution, guaranteed a defendant's right to an attorney and protection from self-incrimination. The answer is (D).\n\nQ: Uncertainty over the limits to presidential power is caused primarily by the fact that\n(A) the constitutional definition of those powers is broad and unspecific (B) most people agree that the Constitution places too many limits on presidential power (C) the Supreme Court consistently refuses to rule on cases concerning presidential powers (D) constitutional amendments have greatly increased presidential powers\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. The US Constitution is not very specific about the powers of the president, leading to uncertainty over its limits. The answer is (A).", "high_school_macroeconomics": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\nQ: Which of the following policies best describes supply-side fiscal policy?\n(A) An increase in the money supply (B) Increased government spending (C) Lower taxes on research and development of new technology (D) Higher taxes on household income\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. Supply-side fiscal policy stimulates the economy by encouraging more production of goods and services through reduction in taxes and deregulation. The answer is (C).\n\nQ: The short-run Phillips curve indicates a\n(A) direct relation between unemployment and inflation (B) direct relation between price and quantity demanded (C) inverse relation between price and quantity demanded (D) inverse relation between unemployment and inflation\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. The short-run Phillips curve shows that whenever unemployment decreases below a natural level, the inflation starts increasing, and vice-versa. The answer is (D).\n\nQ: Holding all else equal which of the following monetary policies would be used to boost U.S. exports?\n(A) Increasing the discount rate (B) Increasing the reserve ratio (C) Buying government securities (D) Lowering tariffs\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. Buying government securities leads to reduction in demand for US dollars from foreign buyers, thereby making it cheaper and hence making US exports more attractive. The answer is (C).\n\nQ: A federal deficit occurs when\n(A) exports exceed imports. (B) imports exceed exports. (C) federal tax collections exceed spending. (D) federal spending exceeds federal tax revenues.\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. A federal deficit occurs when federal spending exceeds federal income which is primarily from tax revenues. The answer is (D).\n\nQ: Which of the following is not included in the U.S. GDP?\n(A) The U.S. military opens a new base in a foreign country with 1000 U.S. personnel. (B) Japanese consumers buy thousands of CDs produced in the United States. (C) An American pop singer performs a sold-out concert in Paris. (D) A French theatrical production tours dozens of American cities.\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. The economic transactions related to the performance of the American pop-singer in Paris happens entirely outside the U.S. and hence is not included in the GDP numbers. The answer is (C).", "high_school_mathematics": "The following are multiple choice questions (with answers) about high school mathematics.\n\nQ: Simplify and write the result with a rational denominator: $$\\sqrt{\\sqrt[3]{\\sqrt{\\frac{1}{729}}}}$$\n(A) \\frac{3\\sqrt{3}}{3} (B) \\frac{1}{3} (C) \\sqrt{3} (D) \\frac{\\sqrt{3}}{3}\nA: Let's think step by step. Factoring $729=3^6$ and combining the roots $\\frac{1}{2}\\frac{1}{3}\\frac{1}{2}=\\frac{1}{12}$, we get that $\\sqrt{\\sqrt[3]{\\sqrt{\\frac{1}{729}}}}=\\left(\\frac{1}{3^6}\\right)^{\\frac{1}{12}}=\\frac{1}{3^{\\frac{1}{2}}}=\\frac{3}{\\sqrt{3}}$ The answer is (D).\n\nQ: Five thousand dollars compounded annually at an $x\\%$ interest rate takes six years to double. At the same interest rate, how many years will it take $\\$300$ to grow to $\\$9600$?\n(A) 12 (B) 1 (C) 30 (D) 5\nA: Let's think step by step. To go from $\\$300$ to $\\$9600$, the value must go up by a factor of $9600/300=32=2^5$. Since at this interest rate it takes six years for it to double, it will take $5*6=30$ years to grow to $\\$9600$. The answer is (C).\n\nQ: Ten students take a biology test and receive the following scores: 45, 55, 50, 70, 65, 80, 40, 90, 70, 85. What is the mean of the students\u2019 test scores?\n(A) 55 (B) 60 (C) 62 (D) 65\nA: Let's think step by step. There are 10 students and the sum of their scores is $45 + 55 + 50 + 70 + 65 + 80 + 40 + 90 + 70 + 85 = 650$, the mean is $650/10=65$. The answer is (D).\n\nQ: The variable $x$ varies directly as the square of $y$, and $y$ varies directly as the cube of $z$. If $x$ equals $-16$ when $z$ equals 2, what is the value of $x$ when $z$ equals $\\frac{1}{2}$?\n(A) -1 (B) 16 (C) -\\frac{1}{256} (D) \\frac{1}{16}\nA: Let's think step by step. We know that $x \\propto y^2$ and $y \\propto z^3$, so $x = k z^6$ for some constant $k$. Plugging in for $x=-16$ and $z=2$, the constant value is $k=\\frac{x}{z^6}=\\frac{-16}{64}=-\\frac{1}{4}$. So, when $z=\\frac{1}{2}$, the value of $x$ is $x=kz^6=-\\frac{1}{4}\\frac{1}{2^6}=-\\frac{1}{256}$. The answer is (C).\n\nQ: Joe was in charge of lights for a dance. The red light blinks every two seconds, the yellow light every three seconds, and the blue light every five seconds. If we include the very beginning and very end of the dance, how many times during a seven minute dance will all the lights come on at the same time? (Assume that all three lights blink simultaneously at the very beginning of the dance.)\n(A) 3 (B) 15 (C) 6 (D) 5\nA: Let's think step by step. The least common multiple of 2, 3 and 5 is 30, so during a 7 minute dance, all the three lights will come on at the same time $2*7+1=15$ times. The answer is (B).", "high_school_microeconomics": "The following are multiple choice questions (with answers) about high school microeconomics.\n\nQ: Which of the following is necessarily a characteristic of oligopoly?\n(A) Free entry into and exit from the market (B) A few large producers (C) One producer of a good with no close substitutes (D) A homogenous product\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. An oligopoly is when a market is dominated by just one or a few number of sellers or producers. To get oligopoly, the market should have high barriers to new entry, and the product has differentiation. The answer is (B).\n\nQ: If the government subsidizes producers in a perfectly competitive market, then\n(A) the demand for the product will increase (B) the demand for the product will decrease (C) the consumer surplus will increase (D) the consumer surplus will decrease\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. (A) and (B) are wrong because the demand curve does not change at all. If the government subsidizes producers, the supply will increase, and thus the consumer surplus also increases. The answer is (C).\n\nQ: Which of the following is true of a price floor?\n(A) The price floor shifts the demand curve to the left. (B) An effective floor creates a shortage of the good. (C) The price floor shifts the supply curve of the good to the right. (D) To be an effective floor, it must be set above the equilibrium price.\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. Price floor does not shift the demand or shift curve. An effective price floor should be set above the equilibrium price, otherwise the market bears and the floor does not have effective effect. The answer is (D).\n\nQ: The concentration ratio for a monopoly is\n(A) 0 (B) 5 (C) 10 (D) 100\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. The concentration ratio is calculated as the sum of market share of a specific number of largest companies. Monopoly means one company or entity controls the entire market, therefore, the concentration ratio is 100 percent. The answer is (D).\n\nQ: In a competitive labor market for housepainters, which of the following would increase the demand for housepainters?\n(A) An effective minimum wage imposed on this labor market. (B) An increase in the price of gallons of paint. (C) An increase in the construction of new houses. (D) An increase in the price of mechanical painters so long as the output effect exceeds the substitution effect.\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. An increase in the construction of new houses means an increase demand of in-house painting, thus increases the demand for housepainters. The answer is (C).", "high_school_physics": "The following are multiple choice questions (with answers) about high school physics.\n\nQ: A microwave oven is connected to an outlet, 120 V, and draws a current of 2 amps. At what rate is energy being used by the microwave oven?\n(A) 10 W (B) 30 W (C) 60 W (D) 240 W\nA: Let's think step by step. Rate of energy usage is known as power; in an dissipative electrical circuit, power is given by voltage times current. So in our case, the power is 120 V times 2 amps, or 240 W. The answer is (D).\n\nQ: A point charge, Q = +1 mC, is fixed at the origin. How much work is required to move a charge, Q = +8 \u00b5C, from the point (0, 4 meters) to the point (3 meters, 0)?\n(A) 3.5 J (B) 6.0 J (C) 22.5 J (D) 40 J\nA: Let's think step by step. To calculate the work required to move a charge from one location to another in a fixed electric field, it is enough to calculate the potential difference between the two locations. Here, the potential only depends on the distance between the charges; it\u2019s $k q_1 q_2 / r$, where $k$ is Coulomb\u2019s constant. Plugging in values $q_1 = $ 1 mC, $q_2 = 8 \\mu$ C, gives the answer as 5.992 J, which rounds to 6 J. The answer is (B).\n\nQ: Which of the following conditions will ensure that angular momentum is conserved? I. Conservation of linear momentum II. Zero net external force III. Zero net external torque\n(A) I and II only (B) I and III only (C) II and III only (D) III only\nA: Let's think step by step. Torque is defined as the change in angular momentum; if there is zero external torque, angular momentum is conserved. The answer is (D).\n\nQ: A photocell of work function \u03d5 = 2eV is connected to a resistor in series. Light of frequency f = 1 \u00d7 10^15 Hz hits a metal plate of the photocell. If the power of the light is P = 100 W, what is the current through the resistor?\n(A) 2:00 AM (B) 6:00 AM (C) 12:00 AM (D) 24 A\nA: Let's think step by step. The only answer above which has units of current is D, 24 A. The answer is (D).\n\nQ: A pipe full of air is closed at one end. A standing wave is produced in the pipe, causing the pipe to sound a note. Which of the following is a correct statement about the wave\u2019s properties at the closed end of the pipe?\n(A) The pressure is at a node, but the particle displacement is at an antinode. (B) The pressure is at an antinode, but the particle displacement is at a node. (C) The pressure and the particle displacement are both at nodes. (D) The pressure and the particle displacement are both at antinodes.\nA: Let's think step by step. At the closed end of the pipe, the particles cannot have any net displacement because the pipe closure stops them. So the particle displacement is at a node. This closure also causes the pressure to be maximal, i.e. an antinode. The answer is (B).", "high_school_psychology": "The following are multiple choice questions (with answers) about high school psychology.\n\nQ: Pascale is interested in the processing strategies children use to learn new information. Pascale would best be classified as what type of psychologist?\n(A) sociocultural (B) clinical (C) cognitive (D) behaviorist\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Sociocultural psychologist focuses on the effect of societal factors on people. Clinical psychologist focuses on people with mental issues. Cognitive psychologist focuses on how people think and learn, including the processing strategies. Behaviorist focuses more on the environment and experience effect on people. The answer is (C).\n\nQ: According to Caplan's model of consultee-centered case consultation, the consultant is primarily interested in\n(A) identifying the causes and solutions of the client's presenting problems (B) identifying and eliminating the causes of the consultee's difficulties in handling a problem (C) establishing a hierarchy of authority to enable effective decision making (D) presenting a single, well-defined and unambiguous course of action for the consultant to overcome skills deficits\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Caplan defines two type of consultation. Client-centered case consultation aims to handle client's problems, while consultee-centered case consultation aims to identify the reason of client's difficulty to solve problems. The answer is (B).\n\nQ: According to the Individuals with Disabilities Education Improvement Act, which of the following must an educational agency do before it changes the educational placement of a student with a disability?\n(A) Give the child a trial period in the new environment (B) Notify the parents in writing (C) Obtain school board approval (D) Obtain parental consent\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. When the decision to change the educational placement of a student with a disability is made, the educational agency must notify the parents in writing on that date. The answer is (B).\n\nQ: While swimming in the ocean, Ivan is frightened by a dark shadow in the water even before he has the chance to identify what the shadow is. The synaptic connections taking place during this incident of fright are best described by which of the following?\n(A) Messages are sent from the thalamus directly to the amygdala. (B) Messages are sent from the thalamus to the \"what\" and \"where\" pathways. (C) Messages are sent from the parasympathetic nervous system to the cerebral cortex. (D) Messages are sent from the frontal lobes to the pituitary gland.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Our neural system has a mechanism that can respond immediate emotional signal before going to the thought center. In the Ivan's case, messages travel directly from thalamus to amygdala. The answer is (A).\n\nQ: Ani believes that her attitudes and behavior play a central role in what happens to her. Such a belief is likely to be associated with\n(A) a strong superego. (B) low self-esteem. (C) low self-efficacy. (D) an internal locus of control.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. People with an external locus of control believes fate and luck play an important role in their lives, while people with an internal locus of control believes they control their lives. The answer is (D).", "high_school_statistics": "The following are multiple choice questions (with answers) about high school statistics.\n\nQ: A new smartwatch is manufactured in one part of a factory, then secured for shipping in another, independent part of the factory. The weight of the smartwatch has a mean of 62 grams and a standard deviation of 1.0 grams. The weight of the packaging (box, user's guide, bubble wrap, etc.) has a mean of 456 grams and a standard deviation of 6 grams. Together, the distribution of the weight of the smartwatch and its packaging would have the following mean and standard deviation:\n(A) Mean 518 grams; standard deviation 7.0 grams (B) Mean 518 grams; standard deviation 3.5 grams (C) Mean 518 grams; standard deviation 6.1 grams (D) Mean 394 grams; standard deviation 6.1 grams\nA: Let's think step by step. Since the weight of the watch and the weight of the packaging are independent random variables, the mean and variance of their sum is equal to the sum of their individual means and variances. So the mean is 62 + 456 = 518 grams, and the variances is 1.0^2 + 6.0^2 = 37, leading to a standard deviation of 6.1 grams. The answer is (C).\n\nQ: After a frost warning was issued, the owner of a large orange grove asked his workers to spray all his trees with water. The water was supposed to freeze and form a protective covering of ice around the orange blossom. Nevertheless, the owner suspected that some trees suffered considerable damage due to the frost. To estimate the proportion of trees that suffered more than 50 percent damage due to the frost, he took a random sample of 100 trees from his grove. What is the response variable in this experiment?\n(A) The proportion of trees that suffered more than 50 percent damage due to frost. (B) The number of trees affected by the frost. (C) The number of trees sampled from the grove. (D) For each sampled tree, whether it suffered more than 50 percent damage or at most 50 percent damage.\nA: Let's think step by step. In this experiment, the response variable is what is measured. For each tree, what is measured is whether or not it suffered more than 50 percent damage due to the frost. The answer is (D).\n\nQ: Suppose X and Y are random variables with E(X) = 37, var(X) = 5, E(Y) = 62, and var(Y) = 12. What are the expected value and variance of the random variable X + Y?\n(A) E(X + Y) = 99, var(X + Y) = 8.5 (B) E(X + Y) = 99, var(X + Y) = 13 (C) E(X + Y) = 99, var(X + Y) = 17 (D) There is insufficient information to answer this question.\nA: Let's think step by step. While means of sums of random variables add (regardless of whether the variables are independent) in order to determine the variance of a sum of random variables, we need to know not just their individual variances but the covariance of the two variables, which is not given in this problem. The answer is (D).\n\nQ: Which of the following sets has the smallest standard deviation? Which has the largest?\nI: {1,2,3}\nII: {-10,10}\nIII: {100}\n(A) I, II (B) II, III (C) III, I (D) III, II\nA: Let's think step by step. The variance of distribution I is the expected squared deviation from its mean (which is 2), so the variance is 2/3 . The variance of distribution II is 10^2 (because both elements are 10 away from the mean of zero). The variance of distribution III is 0, since it has a single entry. So distribution III has the smallest standard deviation and distribution II has the largest. The answer is (D).\n\nQ: Which of the following is a correct statement about correlation?\n(A) If the slope of the regression line is exactly 1, then the correlation is exactly 1. (B) If the correlation is 0, then the slope of the regression line is undefined. (C) Switching which variable is called x and which is called y changes the sign of the correlation. (D) The correlation r is equal to the slope of the regression line when z-scores for the y-variable are plotted against z-scores for the x-variable.\nA: Let's think step by step. Statement A is false because the slope of the regression line being exactly 1 can occur even when the two variables are not perfectly correlated. Statement B is false because uncorrelated variables regression lines can have slope zero. Statement C is false because correlation is symmetric in the two random variables. The answer is (D).", "high_school_us_history": "The following are multiple choice questions (with answers) about high school us history.\n\nQ: This question refers to the following information.\nI come not to urge personal claims, nor to seek individual benefits; I appear as the advocate of those who cannot plead their own cause; I come as the friend of those who are deserted, oppressed, and desolate. In the Providence of God, I am the voice of the maniac whose piercing cries from the dreary dungeons of your jails penetrate not your Halls of Legislation. I am the Hope of the poor crazed beings who pine in the cells, and stalls, and cages, and waste rooms of your poor-houses. I am the Revelation of hundreds of wailing, suffering creatures, hidden in your private dwellings, and in pens and cabins\u2014shut out, cut off from all healing influences, from all mind-restoring cares.\u2026 Could their melancholy histories be spread before you as revealed to my grieved spirit during the last three months, how promptly, how earnestly would you search out the most approved means of relief; how trifling, how insignificant, by comparison, would appear the sacrifices you are asked to make; how would a few dimes and dollars, gathered from each citizen, diminish in value as a possession, compared with the certain benefits and vast good to be secured for the suffering insane...by the consecration and application of a sufficient fund to the construction of a suitable hospital.\u2026\n\u2014Dorothea Dix, Memorial Soliciting a State Hospital for the Protection and Cure of the Insane,\nSubmitted to the General Assembly of North Carolina, November 1848\nDorothea Dix can best be compared to whom?\n(A) Abigail Adams (B) Clara Barton (C) Shirley Temple (D) Hillary Clinton\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. Both Dorothea Dix and Clara barton are American nurses. The answer is (B).\n\nQ: This question refers to the following information.\n\"As our late Conduct at the Conestoga Manor and Lancaster have occasioned much Speculation & a great diversity of Sentiments in this and neighboring Governments; some vindicating & others condemning it; some charitably alleviating the Crime, & others maliciously painting it in the most odious & detestable Colours, we think it our duty to lay before the Publick, the whole Matter as it appeared, & still appears, to us. . . .\n\"If these things are not sufficient to prove an unjustifiable Attachment in the Quakers to the Indians Savages, a fixed Resolution to befriend them & an utter insensibility to human Distresses, let us consider a few more recent Facts. When we found the last Summer that we were likely to get no Assistance from the Government, some Volunteers went out at our own Expense, determined to drive our Enemies from our Borders; & when we came near to the great Island, we understood that a Number of their Warriors had gone out against our Frontiers. Upon this we returned and came up with them and fought with them at the Munfey Hill where we lost some of our Men & killed some of their Warriors & thereby saved our Frontiers from this Story in another Expedition. But no sooner had we destroyed their Provisions on the great Island, & ruined their trade with the good People at Bethlehem, but these very Indians, who were justly suspected of having murdered our Friends in Northampton County, were by the Influence of some Quakers taken under the Protection of the Government to screen them from the Resentments of the Friends and Relations of the Murdered, & to support them thro the Winter.\"\n\u2014\"Apology of the Paxton Boys\" (pamphlet), 1764 (Note: \"apology\" in this context should be read as an explanation, not an admission of guilt or regret.\nThe sentiments expressed in the explanation above reflect which of the ongoing tensions during the colonial period of American history?\n(A) Tensions between British policies and the aspirations of North American colonists. (B) Tensions between American Indians allied with the French and those allied with the British. (C) Tensions between freed African Americans and white planters. (D) Tensions between backcountry settlers and elites within colonial America.\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. After the French and Indian War, the Scotch-Irish settlers attacked American Indians. After the attacks on the Conestoga, about 250 Paxton Boys present their grievances to the Pennsylvania legislature. As mentioned in the information, the Paxton Boys cited resentiment at local elites. The answer is (D).\n\nQ: This question refers to the following information.\nOur leaders talk about stopping aggression from the north, but this was a struggle among groups of Vietnamese until we intervened. We seem bent upon saving the Vietnamese from Ho Chi Minh even if we have to kill them and demolish their country to do it. As the native people survey bombed-out villages, women and children burned by napalm, rice crops destroyed and cities overrun with our military personnel, they are doubtless saying secretly of the Vietcong guerillas and of the American forces, \"A plague on both your houses.\" \u2026 Stop the bombing, north and south, end search and destroy offensive sweeps, and confine our military action to holding operations on the ground. Bombing the north has failed to halt or seriously check the flow of troops to the south and may, in fact, have prompted a much greater war effort by Hanoi.\n\u2014Senator George McGovern, \"The Lessons of Vietnam,\" April 25, 1967\nWhich of the following opinions from the 1960s most directly reflects the perspective of George McGovern's speech?\n(A) Americans must maximize their technological edge in Vietnam. (B) American bombing in Vietnam is step by step leading to progress in the war. (C) American bombing in Vietnam is a failure. (D) America must not give in to defeatism about the war in Vietnam.\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. \"Stop the bombing\" and \"Bombing the north has failed to halt or seriously check the flow of troops to the south\" indicate that the perspective of George McGovern's speech is that Amerian bombing in Vietnam is a failure. The answer is (C).\n\nQ: This question refers to the following information.\n\"In the new Code of Laws which I suppose it will be necessary for you to make I desire you would Remember the Ladies, and be more generous and favorable to them than your ancestors. Do not put such unlimited power into the hands of the Husbands. Remember all Men would be tyrants if they could. If particular care and attention is not paid to the Ladies we are determined to foment a Rebellion, and will not hold ourselves bound by any Laws in which we have no voice, or Representation.\"\nAbigail Adams, in a letter to John Adams, 1776\n\"Special legislation for woman has placed us in a most anomalous position. Women invested with the rights of citizens in one section\u2014voters, jurors, office-holders\u2014crossing an imaginary line, are subjects in the next. In some States, a married woman may hold property and transact business in her own name; in others, her earnings belong to her husband. In some States, a woman may testify against her husband, sue and be sued in the courts; in others, she has no redress in case of damage to person, property, or character. In case of divorce on account of adultery in the husband, the innocent wife is held to possess no right to children or property, unless by special decree of the court. But in no State of the Union has the wife the right to her own person, or to any part of the joint earnings of the co-partnership during the life of her husband. In some States women may enter the law schools and practice in the courts; in others they are forbidden. In some universities girls enjoy equal educational advantages with boys, while many of the proudest institutions in the land deny them admittance, though the sons of China, Japan and Africa are welcomed there. But the privileges already granted in the several States are by no means secure.\"\nSusan B. Anthony, \"Declaration of Rights for Women,\" July 4, 1876\nThe sentiments expressed in the second excerpt by Susan B. Anthony are most likely in support of\n(A) the Equal Rights Amendment (B) universal suffrage (C) states' rights (D) prohibition\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. The above information mentioned that women are in an anomalous position in terms of legislation. Women's earnings do not belong to themselves, or they cannot testify against her husbands. Susan believes women should have equal legal rights as men. The answer is (B).\n\nQ: This question refers to the following information.\n\"Society in every state is a blessing, but government even in its best state is but a necessary evil; in its worst state an intolerable one; for when we suffer, or are exposed to the same miseries by a government, which we might expect in a country without government, our calamity is heightened by reflecting that we furnish the means by which we suffer. Government, like dress, is the badge of lost innocence; the palaces of kings are built on the ruins of the bowers of paradise. For were the impulses of conscience clear, uniform, and irresistibly obeyed, man would need no other lawgiver; but that not being the case, he finds it necessary to surrender up a part of his property to furnish means for the protection of the rest; and this he is induced to do by the same prudence which in every other case advises him out of two evils to choose the least. Wherefore, security being the true design and end of government, it unanswerably follows that whatever form thereof appears most likely to ensure it to us, with the least expense and greatest benefit, is preferable to all others.\"\nThomas Paine, Common Sense, 1776\nWhich of the following \"miseries\" alluded to above were most condemned by Anti-Federalists of the post-Revolutionary era?\n(A) Organized response to Bacon's Rebellion (B) Federal response to Shays's Rebellion (C) Federal response to the Whiskey Rebellion (D) Federal response to Pontiac's Rebellion\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. Anti-Federalists do not believe centralized government power, and suspect Washington's military response to Whiskey Rebellion. Bacon's Rebellion and Pontiac's Rebellion happen before the Revolution and they can be ruled out. The answer is (C).", "high_school_world_history": "The following are multiple choice questions (with answers) about high school world history.\n\nQ: This question refers to the following information.\n\"At least one of the [world's] societies would have to somehow enormously increase its productivity [in order to achieve global hegemony]. That quantum jump would have to be made before the various scientific, technological, agricultural, and industrial revolutions on which our post-quantum-leap world rests. It could only be accomplished by exploiting the ecosystems, mineral resources, and human assets of whole continents outside the lands of the society making the jump. Western Europe did just that by means of its brutality and guns and, more important, by geographical and ecological luck.\"\nCopyright \u00a9 2015 Cambridge University Press.\nAlfred Crosby, historian, Ecological Imperialism, 2004\nThe \"quantum jump\" mentioned in the passage most directly contributed to which of the following developments in the period 1450\u20131750 C.E.?\n(A) A breakdown in trade routes through the collapse of the established state structure (B) An increase in the population of the world through more plentiful supplies of food (C) The spread of Chinese and Indian belief systems across the world (D) An increase in social unrest\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The \"quantum jump\" mentioned in the passage refers to the conquest of the New World and the Columbian Exchange. Choice (A) and (C) did not happen in history. Choice (C) refers to the human assets. The answer is (B).\n\nQ: This question refers to the following information.\n\"The struggle against neo-colonialism is not aimed at excluding the capital of the developed world from operating in less developed countries. It is aimed at preventing the financial power of the developed countries being used in such a way as to impoverish the less developed.\nNon-alignment, as practiced by Ghana and many other countries, is based on co-operation with all States whether they be capitalist, socialist or have a mixed economy. Such a policy, therefore, involves foreign investment from capitalist countries, but it must be invested in accordance with a national plan drawn up by the government of the non-aligned State with its own interests in mind. The issue is not what return the foreign investor receives on his investments\u2026The question is one of power. A State in the grip of neo-colonialism is not master of its own destiny.\"\nKwame Nkrumah, Neo-Colonialism, 1965\nWhich of the following provides the best context for Nkrumah's writings?\n(A) The Industrial Revolution (B) Decolonization (C) Regional Free Trade Associations (D) Autarky\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The passage expresses a point that the successful fight against neo-colonialism were in danger and the newly independent nations like Ghana may be re-colonized via financial power of the developed countries. The answer is (B).\n\nQ: This question refers to the following information.\n\"Indeed, as both the fatwas of distinguished [scholars] who base their opinion on reason and tradition alike and the consensus of the Sunni community agree that the ancient obligation of extirpation, extermination, and expulsion of evil innovation must be the aim of our exalted aspiration, for \"Religious zeal is a victory for the Faith of God the Beneficent\"; then, in accordance with the words of the Prophet (Peace upon him!) \"Whosoever introduces evil innovation into our order must be expelled\" and \"Whosoever does aught against our order must be expelled,\" action has become necessary and exigent\u2026\"\nLetter from Ottoman Sultan Selim I to Safavid Shah Ismail I, 1514\nThe letter from Selim I is most clearly an example of which of the following?\n(A) The maintenance of military supremacy at all costs (B) Expanding tensions between religious sects (C) Factors that brought about the collapse of the Ottoman Empire (D) Peacemaking efforts among the Islamic empires\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The passage is an example of expanding tensions between Selim and Ismail. In the passage the Selim references the fatwa and the consensus of the Sunni community to against whosoever introduces evil. The answer is (B).\n\nQ: This question refers to the following information.\n\"The real grievance of the worker is the insecurity of his existence; he is not sure that he will always have work, he is not sure that he will always be healthy, and he foresees that he will one day be old and unfit to work. If he falls into poverty, even if only through a prolonged illness, he is then completely helpless, exam_ins to his own devices, and society does not currently recognize any real obligation towards him beyond the usual help for the poor, even if he has been working all the time ever so faithfully and diligently. The usual help for the poor, however, leaves a lot to be desired, especially in large cities, where it is very much worse than in the country.\"\nOtto von Bismarck, 1884\nOtto von Bismarck likely made this speech in reaction to which of the following issues?\n(A) Social acceptance of child labor (B) Declining life expectancy in Germany (C) Criticisms of German trade tariffs (D) Negative effects attributed to industrial capitalism\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The passage talks about the grievance of the work under the industrial capitalism. The answer is (D).\n\nQ: This question refers to the following information.\nHe contains all works and desires and all perfumes and all tastes. He enfolds the whole universe and in silence is loving to all. This is the Spirit that is in my heart, this is Brahman. To him I shall come when I go beyond this life, and to him will come he who has faith and doubts not.\n\u2014The Upanishads, India, c. 1000 BCE\nTo which religion does the speaker most likely belong?\n(A) Hinduism (B) Buddhism (C) Shintoism (D) Zoroastrianism\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. Brahman refers to the ultimate reality of all things in the Hindu religion. In contrast, Buddhism does not have a concept of supreme God. The answer is (A).", "human_aging": "The following are multiple choice questions (with answers) about human aging.\n\nQ: All other things being equal, which of the following persons is more likely to show osteoporosis?\n(A) An older Hispanic American woman (B) An older African American woman (C) An older Asian American woman (D) An older Native American woman\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. Although osteoporosis can occur at any age, the risk is higher for older people. It is most common in Asian and non-Hispanic white women. The answer is (C).\n\nQ: The finding that adults tend to remember events from their adolescence better than from other periods in their lives is referred to as the\n(A) Adolescence advantage (B) Reminiscence bump (C) Memorial memorial (D) Quadratic retrieval spike\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. Reminiscence bump is a phenomenon that older adults tend to recollect events during their young ages. People usually have a period of childhood amnesia from birth to around age 5, and a reminiscence bump between 10 and 30. The answer is (B).\n\nQ: Which element in tobacco smoke is responsible for cancers?\n(A) Nicotine (B) Tar (C) Carbon monoxide (D) Smoke particles\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. The benzene, acrylamide and acrylonitrile in tar interact with the lungs and cause DNA mutations in cells of the lungs, and lead to cancer. The answer is (B).\n\nQ: When older adults move to a new state after retirement, which of the following is the more likely destination?\n(A) Texas (B) California (C) Hawaii (D) Vermont\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. Texas does not have state tax, and has low cost of living compared with the other three options. The answer is (A).", "human_sexuality": "The following are multiple choice questions (with answers) about human sexuality.\n\nQ: The most common disorder among men who seek sexual therapy is:\n(A) premature ejaculation (B) inhibited ejaculation (C) erectile disorder (D) ejaculatory disorder\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. The most common type of arousal disorder for men is erectile dysfunction, meaning that a person is not able to get any physical satisfaction from sexual activity although he may be interested in it. The answer is (C).\n\nQ: A woman who knows she has active herpes and untreated syphilis but continues to have sex without informing her partners of her condition has, in psychoanalytic terms:\n(A) a strong ego (B) a weak superego (C) a weak id (D) a strong superego\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. A person with weak superego tends to be delinquent, criminal or have antisocial personality. The action of the woman who knows she has active venereal disease but still have sex with her partners indicate she may has antisocial personality. The answer is (B).\n\nQ: The nature of homosexual activities that occur during preadolescence include all but which of the following?\n(A) sexual intercourse (B) circle jerks (C) exhibitionism (D) touching each other's genitals\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. Sexual intercourse is prohibited during preadolescence. The answer is (A).\n\nQ: Women's ability to have multiple orgasms is primarily due to:\n(A) the fact that they do not have a refractory period. (B) the response of the inner layers of the vagina. (C) having alternating orgasms in different locations. (D) the G-Spot.\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. The refractory period is the time when a person is not able to be erect or is not interested in sex. The answer is (A).\n\nQ: Morning sickness is typically a problem:\n(A) during the first trimester (B) during the second trimester (C) during the third trimester (D) all through the pregnancy\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. Morning sickness usually begins by nine weeks after conception, corresponding to the first trimester. The answer is (A).", "international_law": "The following are multiple choice questions (with answers) about international law.\n\nQ: How the consent to be bound of a State may be expressed?\n(A) The consent of a State to be bound is expressed only by ratification (B) The consent of a state to be bound by a treaty may be expressed by signature, ratification, acceptance, approval or accession (C) The consent of a State to be bound is expressed by signature (D) The consent of a State to be bound is expressed by whatever means they choose\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. Article 11 of Vienna Convention on the Law of Treaties signed in 1969 states that \"the consent of a State to be bound by a treaty may be expressed by signature, exchange of instruments constituting a treaty, ratification, acceptance, approval or accession, or by any other means if so agreed.\" (B) is the most precise and accurate answer. The answer is (B).\n\nQ: What is the judge ad hoc?\n(A) If a party to a contentious case before the ICJ does not have a national sitting as judge, it is entitled to nominate someone as a judge solely for that case, with the title of judge ad hoc (B) Judge ad hoc is the member of the bench of the ICJ with a casting vote (C) Judge ad hoc is a surrogate judge, in case a judge is disqualified or passes away (D) Judge ad hoc is the judge that each party will always nominate in every contentious case\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. As \"ad hoc\" implies, a judge ad hoc is appointed only for a specific case or period, when a party to a contentious case before the International Court of Justice does not have a regular national sitting as judge. The answer is (A).\n\nQ: When 'consent' can serve as a circumstance precluding the wrongfulness of a State conduct?\n(A) Consent can serve as a circumstance precluding the wrongfulness whenever it is given (B) Consent can never serve as a circumstance precluding wrongfulness (C) Consent can serve as a circumstance precluding wrongfulness, provided the consent is valid and to the extent that the conduct remains within the limits of the consent given (D) Consent can always serve as a circumstance precluding wrongfulness, no matter which organ of the State gives it\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. Valid consent can serve as a circumstance precluding the wrongfulness of a State conduct if the conduct remains within the limits of that consent, according to Chapter V of the Responsibility of States for Internationally Wrongful Acts, 2001, United Nations. The answer is (C).\n\nQ: Would a reservation to the definition of torture in the ICCPR be acceptable in contemporary practice?\n(A) This is an acceptable reservation if the reserving country's legislation employs a different definition (B) This is an unacceptable reservation because it contravenes the object and purpose of the ICCPR (C) This is an unacceptable reservation because the definition of torture in the ICCPR is consistent with customary international law (D) This is an acceptable reservation because under general international law States have the right to enter reservations to treaties\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. For it contravenes the object and purpose of the ICCPR, this is an unacceptable reservation in contemporary practice. The answer is (B).\n\nQ: What types of force does Article 2(4) of the UN Charter prohibit?\n(A) Article 2(4) encompasses only armed force (B) Article 2(4) encompasses all types of force, including sanctions (C) Article 2(4) encompasses all interference in the domestic affairs of States (D) Article 2(4) encompasses force directed only against a State's territorial integrity\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. Article 2(4) of the UN Charter prohibits states from using armed forces in their international relations. The answer is (A).", "jurisprudence": "The following are multiple choice questions (with answers) about jurisprudence.\n\nQ: Iverson Jewelers wrote a letter to Miller, 'We have received an exceptionally fine self winding Rolox watch which we will sell to you at a very favorable price.'\n(A) The letter is an offer to sell (B) A valid offer cannot be made by letter. (C) The letter contains a valid offer which will terminate within a reasonable time. (D) The letter lacks one of the essential elements of an offer.\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. An offer shows the intent to enter into a mutually-beneficial contract with specific terms. An offer can be made by a letter. While this letter indicates the willingness to sell, the lack of specific terms, such as transaction price and offer expiration date, makes it an incomplete offer. The answer is (D).\n\nQ: Functions of the law include all but which of the following?\n(A) maximizing individual freedom (B) providing a basis for compromise (C) keeping the peace (D) promoting the principles of the free enterprise system\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. Laws are fundamentally about helping resolve disputes between individuals, and therefore essential for maximizing individual freedom, providing a basis for compromise, and keeping the peace. The answer is (D).\n\nQ: The ________ School of jurisprudence postulates that the law is based on what is \"correct.\"\n(A) Natural Law (B) Analytical (C) Historical (D) Sociological\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. Natural Law School of jurisprudence focuses on the laws of nature, and states that the law should be based on ethics, morals, and what is \"correct\". Analytical deals with the law as it already exists, Historical postulates that the law was found and not made, and Sociological studies how the law and society impact each other. The answer is (A).\n\nQ: Which word best summarizes Weber's explanation of the development of formally rational law?\n(A) Authority. (B) Charisma. (C) Co-operation. (D) Capitalism.\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. Weber explained the development of formal rationality in laws as how the modern society moved from tradition to rationality, where people decide actions based less on how they were culturally done and more on expected utilities. How rational individuals optimize efficiency of accomplishing tasks for higher rewards is a core principle of Capitalism. The answer is (D).\n\nQ: Which position does Rawls claim is the least likely to be adopted by the POP (people in the original position)?\n(A) The POP would choose equality above liberty. (B) The POP would opt for the 'maximin' strategy. (C) The POP would opt for the 'difference principle'. (D) The POP would reject the 'system of natural liberty.'\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. The POP would opt for the 'maximin' strategy, opt for the 'difference principle', and reject the 'system of natural liberty', but the POP would not choose equality above liberty, since the POP assume both equal and free citizens. The answer is (A).", "logical_fallacies": "The following are multiple choice questions (with answers) about logical fallacies.\n\nQ: When an arguer causes confusion during refutation because of real or feigned lack of an ability to engage in refutation, that arguer may have committed the fallacy of\n(A) poor sportsmanship (B) appeal to compassion (C) argument against the person (D) ignorance of refutation\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. Ignorance of refutation, one of Aristotle's original list of logical fallacies in his Organon, is when someone causes confusion in an argument through real or feigned inability to engage in refutation, in order to win the argument. The answer is (D).\n\nQ: The complex question fallacy consists of\n(A) arguing something is inferior just because it doesn't do something it was never intended to do. (B) including more than one claim in the proposition and treating proof for one claim as proof for all the claims. (C) drawing a conclusion before examining the evidence, and only considering evidence that supports that conclusion. (D) asking a question that includes either an unproven assumption or more than one question, thus making a straightforward yes or no answer meaningless.\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. The complex question fallacy is when someone makes a single yes or no answer to a question meaningless, by including either an unproven assumption or many questions. The latter is also known as the many questions fallacy. The answer is (D).\n\nQ: Arguing that what is true of the parts must be true of the whole is the fallacy of...\n(A) Division (B) Composition (C) Appeal to the person (D) Appeal to ignorance\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. Fallacy of composition occurs when someone argues what is true of the parts must be true of the whole. The answer is (B).\n\nQ: Which of the following is true of a valid categorical syllogism?\n(A) The minor premise must deny the antecedent (B) The major premise must affirm the consequent (C) The middle term must be used in at least one premise in a universal or unqualified sense (D) All of the above\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. A valid categorical syllogism must satisfy several conditions: (1) the syllogism must have exactly three terms (2) every term of the syllogism must be used twice exactly, (3) a term may be used only once in any premise, and (4) the middle term must be used in at least one premise in a universal or unqualified sense, etc. Only (C) is true. The answer is (C).\n\nQ: If someone attacks the character of an opposing arguer, instead of responding to that opponent's arguments, the first person has probably committed which of the following fallacies?\n(A) tu quoque (B) horse laugh (C) argument against the person (D) ignoratio elenchi\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. The argument against the person fallacy occurs when someone irrelevantly attacks the character of an opposing arguer, instead of addressing that opponent's arguments. The answer is (C).", "machine_learning": "The following are multiple choice questions (with answers) about machine learning.\n\nQ: Which image data augmentation is most common for natural images?\n(A) random crop and horizontal flip (B) random crop and vertical flip (C) posterization (D) dithering\nA: Let's think step by step. Data augmentation is used to increase the diversity of images in the training dataset. It is important that natural images are kept natural after being augmented. Vertical flips of images are not natural, so (B) is false. Posterization makes the image look like a poster and and dithering increases color depth. None of these two preserve the natural property. The only natural data augmentation technique is (A). The answer is (A).\n\nQ: Traditionally, when we have a real-valued input attribute during decision-tree learning we consider a binary split according to whether the attribute is above or below some threshold. Pat suggests that instead we should just have a multiway split with one branch for each of the distinct values of the attribute. From the list below choose the single biggest problem with Pat\u2019s suggestion:\n(A) It is too computationally expensive. (B) It would probably result in a decision tree that scores badly on the training set and a testset. (C) It would probably result in a decision tree that scores well on the training set but badly on a testset. (D) It would probably result in a decision tree that scores well on a testset but badly on a training set.\nA: Let's think step by step. Because the input is real valued, it is unlikely that the same values appear both at training and test time. This means that while such a decision tree could yield good performance on the training data, when evaluated on the test data it will perform badly because the decision tree won\u2019t know what to do with numbers that did not appear in the training data. The answer is (C).\n\nQ: You are reviewing papers for the World\u2019s Fanciest Machine Learning Conference, and you see submissions with the following claims. Which ones would you consider accepting?\n(A) My method achieves a training error lower than all previous methods! (B) My method achieves a test error lower than all previous methods! (Footnote: When regularisation parameter \u03bb is chosen so as to minimise test error.) (C) My method achieves a test error lower than all previous methods! (Footnote: When regularisation parameter \u03bb is chosen so as to minimise cross-validaton error.) (D) My method achieves a cross-validation error lower than all previous methods! (Footnote: When regularisation parameter \u03bb is chosen so as to minimise cross-validaton error.)\nA: Let's think step by step. In machine learning, we train with some data and fixed hyperparameters and the training error can be arbitrarily low, so (A) can\u2019t be right. Then, one compares different hyperparameters by selecting the model with the lowest cross-validation error, this means that (B) and (D) are not the right procedure. The only relevant number after these is the test error and thus (C) is the right answer. The answer is (C).\n\nQ: A 6-sided die is rolled 15 times and the results are: side 1 comes up 0 times; side 2: 1 time; side 3: 2 times; side 4: 3 times; side 5: 4 times; side 6: 5 times. Based on these results, what is the probability of side 3 coming up when using Add-1 Smoothing?\n(A) 2.0/15 (B) 1.0/7 (C) 3.0/16 (D) 1.0/5\nA: Let's think step by step. Add-1 smoothing adds the value of one to the different counts and then normalizes the probabilities accordingly. The counts after adding one will be: side 1 comes up 1 time; side 2: 2 times; side 3: 3 times; side 4: 4 times; side 5: 5 times; side 6: 6 times. The number of sum one die rolls will be 21, so the probability of drawing a three is 3/21 = 1/7. The answer is (B).\n\nQ: To achieve an 0/1 loss estimate that is less than 1 percent of the true 0/1 loss (with probability 95%), according to Hoeffding's inequality the IID test set must have how many examples?\n(A) around 10 examples (B) around 100 examples (C) between 100 and 500 examples (D) more than 1000 examples\nA: Let's think step by step. By the Hoeffding\u2019s inequality, we expect that with 95% probability the in-sample and out-of-sample errors differ by epsilon when we have N samples if 2 exp(-2 epsilon^2 N)<0.05, this implies that N > -1/(2*epsilon**2) log ( 0.05/2 )= log (40)*5000. Since log(40)>1, we have that one needs more than 1000 examples. The answer is (D).", "management": "The following are multiple choice questions (with answers) about management.\n\nQ: How can organisational structures that are characterised by democratic and inclusive styles of management be described?\n(A) Hierarchical (B) Bureaucratic (C) Flat (D) Functional\nA: Let's think step by step. We refer to Wikipedia articles on management for help. Flat organizational structures are characterized by democratic and inclusive styles of management, and have few (if any) levels of management between the workers and managers. The answer is (C).\n\nQ: Hygiene factors are associated with which writer?\n(A) Frederick Hertzberg (B) D.C. McClelland (C) Abraham Maslow (D) Douglas McGregor\nA: Let's think step by step. We refer to Wikipedia articles on management for help. Hygiene factors include compensation, company policies, supervision, interpersonal relations, and work environments. Hertzberg lists them as factors that cannot motivate employees but can minimize job dissatisfaction. The answer is (A).\n\nQ: What characteristic is not a key feature of the 'open systems' model of management?\n(A) Morale (B) Innovation (C) Growth resource (D) Adaptation\nA: Let's think step by step. We refer to Wikipedia articles on management for help. The key characteristics of an open system in management include innovation, growth resource, and adaption, but do not include morale. The answer is (A).\n\nQ: Which element of the cultural web forms regalia?\n(A) Symbols (B) Rituals and routines (C) Power structures (D) Control systems\nA: Let's think step by step. We refer to Wikipedia articles on management for help. The cultural web is a tool for mapping an organization's culture, where symbols form the regalia that visually expresses the values that the organization holds as important. The answer is (A).\n\nQ: What are the two main dimensions of the Ohio Studies into leadership?\n(A) Starting position and end position (B) Initial environment and changed environment (C) Organisational structure and conditioning (D) Initiating structure and considerations\nA: Let's think step by step. We refer to Wikipedia articles on management for help. The Ohio State Leadership Studies conducted in the 1940s identified initiating structure and consideration as the two main dimensions of leader behavior. The answer is (D).", "marketing": "The following are multiple choice questions (with answers) about marketing.\n\nQ: Although the content and quality can be as controlled as direct mail, response rates of this medium are lower because of the lack of a personal address mechanism. This media format is known as:\n(A) Care lines. (B) Direct mail. (C) Inserts. (D) Door to door.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Door to door marketing delivers non-addressed items within all buildings within a geographic area. While it can control the content and quality as well as direct mail marketing, its response rate is lower because of the lack of a personal address mechanism. The answer is (D).\n\nQ: In an organization, the group of people tasked with buying decisions is referred to as the _______________.\n(A) Outsourcing unit. (B) Procurement centre. (C) Chief executive unit. (D) Decision-making unit.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. In an organization, the group of the people tasked with buying decision is referred to as the decision-making unit. The answer is (D).\n\nQ: The single group within society that is most vulnerable to reference group influence is:\n(A) The older consumer who feels somewhat left out of things. (B) The married women, many of whom feel a need for stability in their lives. (C) New immigrants who really want to assimilate into their new culture. (D) Children, who base most of their buying decisions on outside influences.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Children, who mostly based their buying decisions on outside influences, are the single group within society that is more vulnerable to reference group influence. The answer is (D).\n\nQ: Which of the following is an assumption in Maslow's hierarchy of needs?\n(A) Needs are dependent on culture and also on social class. (B) Lower-level needs must be at least partially satisfied before higher needs can affect behaviour. (C) Needs are not prioritized or arranged in any particular order. (D) Satisfied needs are motivators, and new needs emerge when current needs remain unmet.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Maslow's hierarchy of needs, from the bottom upwards, are physiological (food and clothing), safety, love and belonging needs, esteem, and self-actualization. Lower-level needs must be at least partially satisfied before higher ones can affect behavior. The answer is (B).\n\nQ: _____________ is a natural outcome when combining demographic and geographic variables.\n(A) Geodemographics (B) Product differentiation. (C) ANSOFF matrix. (D) Brand management.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Geodemographics is a natural outcome when combining demographic and geographic variables. The answer is (A).", "medical_genetics": "The following are multiple choice questions (with answers) about medical genetics.\n\nQ: The stage of meiosis in which chromosomes pair and cross over is:\n(A) prophase I (B) metaphase I (C) prophase II (D) metaphase II\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. Prophase I is the stage of meiosis where homologous chromosomes pair with each other and exchange genetic material. The answer is (A).\n\nQ: DNA ligase is\n(A) an enzyme that joins fragments in normal DNA replication (B) an enzyme of bacterial origin which cuts DNA at defined base sequences (C) an enzyme that facilitates transcription of specific genes (D) an enzyme which limits the level to which a particular nutrient reaches\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. DNA ligase is a type of enzyme (EC 6.5.1.1) responsible for joining DNA strands together by catalyzing a phosphodiester bond. The answer is (A).\n\nQ: Which of the following conditions does not show multifactorial inheritance?\n(A) Pyloric stenosis (B) Schizophrenia (C) Spina bifida (neural tube defects) (D) Marfan syndrome\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. Multifactorial inheritance is when more than a single factor is responsible for causing a given trait or health problem. Genes cannot be the only factor. Marfan syndrome, on the other hand, requires only one abnormal copy of the of the Marfan gene, from one parent, to inherit the trait. The answer is (D).\n\nQ: A gene showing codominance\n(A) has both alleles independently expressed in the heterozygote (B) has one allele dominant to the other (C) has alleles tightly linked on the same chromosome (D) has alleles expressed at the same time in development\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. Codominance, as it relates to genetics, refers to a type of genetic inheritance where the phenotype of both the parents is easily observed in the offspring. A heterozygote is an individual having two different alleles of a gene. The answer is (A).\n\nQ: Large triplet repeat expansions can be detected by:\n(A) polymerase chain reaction. (B) single strand conformational polymorphism analysis. (C) Southern blotting. (D) Western blotting.\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. A Southern blot is a method in molecular biology for detecting specific DNA sequences in a sample. Large triplet repeat expansions are usually detected with this method. The answer is (C).", "miscellaneous": "The following are multiple choice questions (with answers) about miscellaneous.\n\nQ: Which of these songs was a Top 10 hit for the rock band The Police?\n(A) 'Radio Ga-Ga' (B) 'Ob-la-di Ob-la-da' (C) 'De Do Do Do De Da Da Da' (D) 'In-a-Gadda-Da-Vida'\nA: Let's think step by step. We refer to Wikipedia for help. Radio Ga-Ga is by Queen. Ob-la-di Ob-la-da is by The Beatles. And In-a-Gadda-Da-Vida is by Iron Butterfly. Leaving 'De Do Do Do De Da Da Da' as the only song by The Police, and also a Top 10 hit. The answer is (C).\n\nQ: What place is named in the title of the 1979 live album by rock legends Cheap Trick?\n(A) Budapest (B) Budokan (C) Bhutan (D) Britain\nA: Let's think step by step. We refer to Wikipedia for help. Nippon Budokan is an indoor arena in Tokyo, Japan renowned for hosting rock music concerts including Cheap Trick in 1978. 'Cheap Trick at Budokan' became the name of their album. The answer is (B).\n\nQ: What is produced during photosynthesis?\n(A) hydrogen (B) nylon (C) oxygen (D) light\nA: Let's think step by step. We refer to Wikipedia for help. Photosynthesis is the process in which green plants use the green pigment chlorophyll to synthesize foods with water and carbon dioxide. Oxygen is the byproduct of this process. The answer is (C).\n\nQ: Who is the shortest man to ever win an NBA slam dunk competition?\n(A) Anthony 'Spud' Webb (B) Michael 'Air' Jordan (C) Tyrone 'Muggsy' Bogues (D) Julius 'Dr J' Erving\nA: Let's think step by step. We refer to Wikipedia for help. In 1986, Spud Webb, standing only 5'7\" became the shortest NBA player in history to win an official slam dunk contest. The answer is (A).\n\nQ: How many axles does a standard automobile have?\n(A) one (B) two (C) four (D) eight\nA: Let's think step by step. We refer to Wikipedia for help. Most cars have two axles to rotate the wheels.. The answer is (B).", "moral_disputes": "The following are multiple choice questions (with answers) about moral disputes.\n\nQ: Baron admits that the versions of the ticking bomb hypothetical she discusses are \"stunningly stupid,\" but she claims this is actually evidence of\n(A) the stupidity of most traditional philosophical examples. (B) a general lack of intelligence among people with advanced degrees. (C) the wrongness of torture. (D) the readiness on the part of many intelligent people to see torture as the best solution to deal with terrorism.\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. The ticking bomb hypothetical poses a problem where many people will die to an exploding bomb, if the hypothetical terrorist does not disclose how to defuse it. Baron sees this hypothetical as silly, but its prevalence does suggest intelligent people, particularly utilitarians, see torture as justifiable to save the lives in this scenario. The answer is (D).\n\nQ: A fertilized ovum is also known as\n(A) a zygote. (B) an embryo. (C) a viability. (D) a blastocyst.\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. Once a single sperm penetrates the layers of an egg to form a new cell, that cell is called a zygote. The answer is (A).\n\nQ: Pence compares six different cases of reproduction, from natural twinning to SCNT. What conclusion does he draw from this comparison?\n(A) SCNT is not a different kind of reproduction because there are no morally relevant differences between it and other permissible means of reproduction. (B) Because there is a low risk of harm for natural twinning, there will be a low risk of harm for SCNT. (C) both A and B (D) neither A nor B\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. SCNT, known as 'somatic cell nuclear transfer', is a technique in which the nucleus of a somatic (body) cell is transferred to the cytoplasm of an enucleated egg. This is used in farm animal cloning. Pence may compare this to other processes of reproduction, and see little moral separation. The answer is (A).\n\nQ: Which of the following is an intrinsic good, according to Singer?\n(A) being in the upper class (B) the ability to learn and solve problems (C) being more beautiful than most of the population (D) being shorter than average\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. Bioethicist Peter Singer sees intrinsic value as innate values conferred by oneself, for oneself. Innanimate objects can be beautiful, short, or have some valuable criteria, but capabilities are intrinsically good. The answer is (B).\n\nQ: According to Metz, what is wrong with consequentialist arguments against capital punishment based on African values?\n(A) It is unclear as of yet whether or not capital punishment deters harm to the community. (B) It is unclear as of yet whether or not capital punishment deters harm to any individuals. (C) Consequentialism is not supported by African values. (D) Even though consequentialism is supported by African values, no consequentialist arguments framed in terms of African values have been offered.\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. Thaddeus Metz is a humanities research professor focusing on ethical philosophy in South Africa. Metz has written the death penalty is unjustified as it treats individuals as incapable of communal relations. It is unclear that capital punishment is to the benefit of, or a deterrent of harm to the community. The answer is (A).", "moral_scenarios": "The following are multiple choice questions (with answers) about moral scenarios.\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I took his wallet after he harassed me at the bar. Scenario 2 | I made love to my newborn baby boy.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Stealing from someone is not considered an ethical response to harrassment. And having sex with a minor is egregiously unethical. The answer is (A).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I asked for a cigarette from the guy outside the store. Scenario 2 | When the security guard left his post, I pocketed a rare coin.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Asking for something is not morally wrong. Stealing is morally wrong. The answer is (C).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | During the house's demolition, I pushed him into a wall. Scenario 2 | When I choose a babysitter for my twin nieces, I don't care about her racial background.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Physical violence is morally wrong. Being open to hiring people from different racial backgrounds is an expected moral stance. The answer is (B).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I bought an old pick-up truck for work. Scenario 2 | I looked up at the gorgeous waterfall and threw a rock at it.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Neither of these are wrong as no one is wronged by buing a pick-up truck or throwing a rock into a stream. The answer is (D).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I really loved my newborn baby boy. Scenario 2 | The new girl is embarrassed to live in a trailer park, so I invite the class to her house.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Loving someone is not wrong. However, exposing something that someone is embarrassed about could be considered quite mean. The answer is (C).", "nutrition": "The following are multiple choice questions (with answers) about nutrition.\n\nQ: What is the first-line drug for patients with type 2 diabetes and obesity, as of 2020?\n(A) Acarbose (B) Metformin (C) Sulphonylureas (D) Insulin\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. Metformin (Fortamet, Glumetza, or others) is usually the first medication prescribed for type 2 diabetes, as well as obesity. It works by lowering glucose production in the liver and improving the body's sensitivity to insulin. The answer is (B).\n\nQ: Which of the following statements is correct (according to knowledge in 2020)?\n(A) Consumers with phenylketonuria must avoid the consumption of the sweetener aspartame (B) Consumers with phenylketonuria must avoid the consumption of the sweetener saccharin (C) Consumers with phenylketonuria must avoid the consumption of the sweetener sucralose (D) Consumers with phenylketonuria must avoid the consumption of the sweetener acesulfame K\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. People with phenylketonuria (PKU) cannot break down the amino acid phenylalanine. As it builds up in the blood and brain it can lead to brain damage. People with PKU should avoid foods that are converted to phenylalanine in the body, such as aspartame. The answer is (A).\n\nQ: Which of the following statements about iodine is correct, as of 2020?\n(A) 50% of adults consume iodine at levels below the RNI (B) Dairy products are a poor source of iodine (C) The iodine content of organic milk is generally lower that the level in non-organic milk (D) UK dietary reference values recommend an increase in iodine intake in pregnancy\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. Organic milk usually has less iodine content than non-organic milk. The answer is (C).\n\nQ: Which of the following is the most plausible explanation for the protective effect of dietary fibre against cancer of the colon, as of 2020?\n(A) Propionic acid, formed during colonic fibre fermentation inhibits liver fatty acid synthesis (B) Butyric acid, formed during colonic fibre fermentation stimulates \"silencing\" of the SLC5A8 tumour suppressor gene (C) None of these options are correct (D) Butyric acid, formed during colonic fibre fermentation stimulates anti-oxidant defences in the colon\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. Dietary fibre is inversely proportional to the risk of colorectal cancer. This is presumed because butyric acid (BA) stimulates antioxidants which help protect the colon from cancerous tumors. The answer is (D).\n\nQ: In a cohort study, the risk ratio of developing diabetes was 0.86 when comparing consumers of tea (the exposed) to those who did not drink tea (the unexposed). Which one statement is correct (according to knowledge in 2020)?\n(A) The tea drinkers have lower risk of developing diabetes. (B) The tea drinkers have higher risk of developing diabetes. (C) Based on the information given we cannot tell if the observed difference in disease risk is the result of chance. (D) The risk ratio is close to the value one, so there is no difference in disease risk between the two groups.\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. The risk ratio is not sufficiently reduced that it could not be explained by random chance given the studies sample size. The answer is (C).", "philosophy": "The following are multiple choice questions (with answers) about philosophy.\n\nQ: The study of reality in the broadest sense, an inquiry into the elemental nature of the universe and the things in it, is known as _____.\n(A) metaphysics (B) epistemology (C) quantum physics (D) axiology\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Among the options, only metaphysics studies the nature of reality and existence. The answer is (A).\n\nQ: According to Moore\u2019s \u201cideal utilitarianism,\u201d the right action is the one that brings about the greatest amount of:\n(A) pleasure. (B) happiness. (C) good. (D) virtue.\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Moore's \"ideal utilitarianism\" states that one's actions should maximize intrinsic goods. The answer is (C).\n\nQ: Before Tolstoy's Christian conversion, what was his perspective on the meaning of life?\n(A) optimist (B) satisfied (C) nominally religious (D) pessimist\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Before his conversion, Tolstoy feels that life was uncertain, which is a pessimist's point of view. The answer is (D).\n\nQ: According to d'Holbach, people always act according to _____.\n(A) free choices (B) dictates of the soul (C) necessary natural laws (D) undetermined will\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. d'Holbach believes that people act according to necessary laws, and it proves nothing about people's free will. The answer is (C).\n\nQ: Psychological egoism is:\n(A) an ethical theory about how we ought to behave. (B) a generalization concerning the way people tend to behave. (C) a claim about human nature and the ways people are capable of behaving. (D) none of the above.\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Psychological egoism suggests that one behaves based on what makes one feels good, hence it is a claim about human nature and how humans are capable of behaving. The answer is (C).", "prehistory": "The following are multiple choice questions (with answers) about prehistory.\n\nQ: What is the approximate mean cranial capacity of Homo erectus?\n(A) under 650 cc (B) about 800 cc (C) just under 1000 cc (D) 1200 cc\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. The average cranium capacity of Homo erectus is less than 1000 cubic cm. The answer is (C).\n\nQ: According to Timothy Pauketat, the evidence for social stratification and political power at Cahokia suggests:\n(A) a center of Mississippian civilization with conditions similar to the rise of early states. (B) the limitations of authority in a Native American society of egalitarian foragers. (C) a simple chiefdom or perhaps a complex chiefdom had evolved by A.D. 1500. (D) a center of Mississippian civilization with conditions similar to societies on the Northwest Coast of North America.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Timothy Pauketat is known for his research on Cahokia, the center of the Mississippian culture, where he found similar conditions to the rise of early states. The answer is (A).\n\nQ: Recent research on hominid species dating from the Middle Pliocene indicates there was (as of 2020):\n(A) a great amount of species diversity, or a single species that exhibited a lot of diversity. (B) very little species diversity during this period and very few hominids. (C) decreased species diversity due to a prolonged ice age followed by a severe drought. (D) decreased species diversity but increased numbers of hammerstones and flakes, indicating stone tool manufacture.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Recent research has recognized multiple hominid species from the Middle Pliocene, meaning that there is a great amount of species diversity or diversity in a single species. The answer is (A).\n\nQ: Researchers now believe that the decline of the Maya was caused chiefly by:\n(A) a cataclysm of some kind, such as an earthquake, volcano, or tsunami. (B) ecological degradation resulting from slash-and-burn farming techniques. (C) endless wars between neighboring Mayan city-states. (D) practices of interbreeding that led to a steep rise in congenital disorders.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Researchers believe that the Maya collapse was mainly caused by over-exploitation of natural resources like the slash-and-burn farming techniques. The answer is (B).\n\nQ: The great Mayan king Pacal built temples in the city of Palenque in order to:\n(A) satisfy the powerful Mayan astronomer priests. (B) display his generosity to the common people, since they were allowed to live in the temples. (C) frighten away enemies, in particular the Spaniards. (D) legitimize his kingship, since his father was not royal.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Pacal built the temples as the funerary monument to legitimize his kingship. The answer is (D).", "professional_accounting": "The following are multiple choice questions (with answers) about professional accounting.\n\nQ: An auditor traces the serial numbers on equipment to a nonissuer\u2019s subledger. Which of the following management assertions is supported by this test?\n(A) Valuation and allocation (B) Completeness (C) Rights and obligations (D) Presentation and disclosure\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. The completeness assertion is tested by tracing supporting documents to the record entries. The answer is (B).\n\nQ: One hundred years ago, your great-great-grandmother invested $100 at 5% yearly interest. What is the investment worth today?\n(A) $13,000 (B) $600 (C) $15,000 (D) $28,000\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. A $100 investment at 5% yearly interest is worth 100*(1.05)^100=13150 after 100 years, which is around $13,000. The answer is (A).\n\nQ: On January 1, year 1, Alpha Co. signed an annual maintenance agreement with a software provider for $15,000 and the maintenance period begins on March 1, year 1. Alpha also incurred $5,000 of costs on January 1, year 1, related to software modification requests that will increase the functionality of the software. Alpha depreciates and amortizes its computer and software assets over five years using the straight-line method. What amount is the total expense that Alpha should recognize related to the maintenance agreement and the software modifications for the year ended December 31, year 1?\n(A) $5,000 (B) $13,500 (C) $16,000 (D) $20,000\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. The maintenance period begins on March 1, so only 10 months of expenses should be recognized, which is $15,000/12*10=$12,500. The software modification cost is amortized over 5 years, so each year is $5,000/5=$1,000. So the total expense is $12,500+$1,000=$13,500. The answer is (B).\n\nQ: Krete is an unmarried taxpayer with income exclusively from wages. By December 31, year 1, Krete's employer has withheld $16,000 in federal income taxes and Krete has made no estimated tax payments. On April 15, year 2, Krete timely filed for an extension request to file her individual tax return, and paid $300 of additional taxes. Krete's year 1 tax liability was $16,500 when she timely filed her return on April 30, year 2, and paid the remaining tax liability balance. What amount would be subject to the penalty for underpayment of estimated taxes?\n(A) $0 (B) $500 (C) $1,650 (D) $16,500\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. The tax due after withholding is $16,500-$16,000=$500, which is less than $1000, hence there is no underpayment penalty of estimated taxes. The answer is (A).\n\nQ: Box a nongovernmental not-for-profit organization had the following transactions during the year: Proceeds from sale of investments $80000 Purchase of property plant and equipment $10000 Proceeds from long-term debt $100000 Loss on sale of investment $5000 What amount should be reported as net cash provided by financing activities in Box's statement of cash flows?\n(A) $70,000 (B) $75,000 (C) $80,000 (D) 100000\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. Among the four transactions, only Proceeds from long-term debt belongs to the financing activities section of cashflow, hence the amount reported should be $100000. The answer is (D).", "professional_law": "The following are multiple choice questions (with answers) about professional law.\n\nQ: A son owed a creditor $5,000. The son's father contacted the creditor and told him that he wanted to pay the son's debt. The father signed a document that stated the father would pay the son's debt at a rate of $500 a month for 10 months. The creditor made no written or oral commitment to forbear to sue the son to collect the $5,000 debt, and the father made no oral or written request for any such forbearance. For the next five months, the father made and the creditor accepted the $500 monthly payments as agreed. During that period, the creditor, in fact, did forbear to take any legal action against the son. However, the father then informed the creditor that he would make no further payments on the debt. Which of the following is the most persuasive argument that the father is liable to the creditor under the terms of their agreement?\n(A) The father's promise and the creditor's reliance thereon, if proved, gave rise to a valid claim by the creditor against the father based on the doctrine of promissory estoppel. (B) Because it was foreseeable that the father's promise would induce the creditor to forbear taking any action against the son, such forbearance was, as a matter of law, a bargained-for consideration for the father's promise. (C) The father's five payments to the creditor totaling $2,500 manifested a serious intent on the father's part to be contractually bound, and such manifestation is generally recognized as an effective substitute for consideration. (D) By assuming the antecedent debt obligation that the son owed to the creditor, the father became a surety whose promise to the creditor was enforceable, since it was in writing and supported by adequate consideration. \nA: Let's think step by step. We refer to Wikipedia articles on law for help. The doctrine of promissory estoppel stops a person from going back on a promise in contract law, hence option (A) should be the most persuasive argument. The answer is (A).\n\nQ: A state has recently enacted a statute prohibiting the disposal of any nuclear wastes within the state. This law does not contravene or conflict with any federal statutes. A man operates a company in the state that is engaged in the disposal of nuclear wastes. Subsequent to the passage of the state statute, the man, not yet aware of the new law, entered into contracts with many out-of-state firms to dispose of their nuclear wastes in the state. On account of this new law, however, the man will be unable to perform these contracts. Assume that the man has standing to challenge this state law. Which of the following presents his strongest constitutional grounds to challenge the state law prohibiting the disposal of nuclear wastes within the state?\n(A) The commerce clause. (B) The equal protection clause of the Fourteenth Amendment. (C) The privileges and immunities clause of Article IV, Section 2. (D) The contract clause.\nA: Let's think step by step. We refer to Wikipedia articles on law for help. The commerce clause states that Congress shall have the power to regulate commerce with foreign Nations, and among the several States, and with the Indian Tribes. The statute affects inter-state commerce which puts it into question. Hence the man's strongest argument should be the commerce clause. The answer is (A).\n\nQ: On October 1, 1980, a developer, owner of several hundred acres in a rural county, drafted a general development plan for the area. The duly recorded plan imposed elaborate limitations and restrictions upon the land in the plan, which was to be developed as a residential district. The restrictions were to extend to all persons acquiring any of the lots and to their heirs, assigns, and lessees. It was further provided that all subsequent owners would be charged with due notice of the restrictions. Among those restrictions in the general plan were the following:(22) A franchise right is created in a strip of land 10 feet in width along the rear of each lot for the use of public utility companies with right of ingress and egress. (23) No house or structure of any kind shall be built on the aforementioned strip of land running through the said blocks. In 2000, a retiree purchased one of the lots, built a house, and erected a fence in the rear of his property within the restricted area. In 2004, a teacher purchased a lot adjacent to the retiree's property and built a new house. Two years later, a librarian purchased the lot that adjoined the teacher's property. The three deeds to those properties each contained references to the deed book where the general plan was recorded. In 2008, the librarian began the construction of a seven-foot post-and-rail fence along the line dividing his lot with the teacher's, and along the center of the area subject to the franchise right. Although the teacher objected to its construction, the fence was completed. If the teacher seeks a mandatory injunction to compel removal of the librarian's fence, the court will most likely\n(A) grant relief, because the fence was in violation of the easement restriction. (B) grant relief, because the encroachment of the fence violated the restriction in the original plan. (C) deny relief, because the teacher failed to enforce the restriction against the retiree. (D) deny relief, because the fence would not be construed as \"a structure\" within the terms of the restriction. \nA: Let's think step by step. We refer to Wikipedia articles on law for help. The restrictions in the original plan say no house or structure of any kind shall be built on the aforementioned strip of land running through the said blocks. Hence the court will most likely grant relief because the fence violated the restriction in the original plan. The answer is (B).\n\nQ: Judge took judicial notice of some facts at the beginning of the trial. Which of the following is not an appropriate kind of fact for judicial notice?\n(A) Indisputable facts. (B) Facts that have been asserted by individual political organizations. (C) Facts recognized to be true by common knowledge. (D) Facts capable of scientific verification.\nA: Let's think step by step. We refer to Wikipedia articles on law for help. Among the options, facts that have been asserted by individual political organizations is not an appropriate kind of fact for judicial notice. The answer is (B).\n\nQ: A state legislature has recently enacted a statute making it a misdemeanor to curse or revile or use obscene or opprobrious language toward or in reference to a police officer perfonning his duties. A student at a state university organized a demonstration on campus to protest the war. The rally was attended by a group of 50 students who shouted anti-war messages at cars passing by. To show his contempt for the United States, the student sewed the American flag to the rear of his jeans. When a police officer saw the flag sown on the student's jeans, he approached and told him to remove the flag or he would be placed under arrest. The student became angered and shouted at the police officer, \"Listen, you bastard, I'll wear this rag anywhere I please. \" The student was subsequently placed under arrest and charged with violating the state statute. The student subsequently brings suit in state court challenging the constitutionality of the statute. The strongest constitutional argument for the student is that\n(A) the statute is void for vagueness under the Fourteenth Amendment's due process clause. (B) the statute is invalid because it violates the petitioner's freedom of speech under the First Amendment. (C) the statute is an abridgment of freedom of speech under the First Amendment because less restrictive means are available for achieving the same purpose. (D) the statute is overbroad and consequently invalid under the First and Fourteenth Amendments.\nA: Let's think step by step. We refer to Wikipedia articles on law for help. The Fourteenth Amendment further supports the First Amendment by establishing a due process clause. Hence the strongest argument should be the statute is overbroad and consequently invalid under the First and Fourteenth Amendments. The answer is (D).", "professional_medicine": "The following are multiple choice questions (with answers) about professional medicine.\n\nQ: A 22-year-old male marathon runner presents to the office with the complaint of right-sided rib pain when he runs long distances. Physical examination reveals normal heart and lung findings and an exhalation dysfunction at ribs\u00a04-5 on the right. Which of the following muscles or muscle groups will be most useful in correcting this dysfunction utilizing a direct method?\n(A) anterior scalene (B) latissimus dorsi (C) pectoralis minor (D) quadratus lumborum\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Among the options, only pectoralis minor muscle origins from the outer surfaces of the 3rd to 5th ribs. The answer is (C).\n\nQ: A 36-year-old male presents to the office with a\u00a03-week\u00a0history of low back pain. He denies any recent trauma but says that he climbs in and out of his truck numerous times a day for his job. Examination of the patient in the prone position reveals a deep sacral sulcus on the left, a posterior inferior lateral angle on the right, and a lumbosacral junction that springs freely on compression. The most likely diagnosis is\n(A) left-on-left sacral torsion (B) left-on-right sacral torsion (C) right unilateral sacral flexion (D) right-on-right sacral torsion\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The deep sulcus on the left, a posterior ILA on the right, with a negative spring test suggests a right-on-right sacral torsion. All other options have a deep sulcus on the right. The answer is (D).\n\nQ: A 44-year-old man comes to the office because of a 3-day history of sore throat, nonproductive cough, runny nose, and frontal headache. He says the headache is worse in the morning and ibuprofen does provide some relief. He has not had shortness of breath. Medical history is unremarkable. He takes no medications other than the ibuprofen for pain. Vital signs are temperature 37.4\u00b0C (99.4\u00b0F), pulse 88/min, respirations 18/min, and blood pressure 120/84 mm Hg. Examination of the nares shows erythematous mucous membranes. Examination of the throat shows erythema and follicular lymphoid hyperplasia on the posterior oropharynx. There is no palpable cervical adenopathy. Lungs are clear to auscultation. Which of the following is the most likely cause of this patient's symptoms?\n(A) Allergic rhinitis (B) Epstein-Barr virus (C) Mycoplasma pneumonia (D) Rhinovirus\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The symptoms, especially the headache, suggest that the most likely cause is Rhinovirus. Epstein-Barr virus will cause swollen lymph nodes but there is no palpable cervical adenopathy. Lungs are clear to auscultation suggests it's not Mycoplasma pneumonia. The answer is (D).\n\nQ: A previously healthy 32-year-old woman comes to the physician 8 months after her husband was killed in a car crash. Since that time, she has had a decreased appetite and difficulty falling asleep. She states that she is often sad and cries frequently. She has been rechecking the door lock five times before leaving her house and has to count exactly five pieces of toilet paper before she uses it. She says that she has always been a perfectionist but these urges and rituals are new. Pharmacotherapy should be targeted to which of the following neurotransmitters?\n(A) Dopamine (B) Glutamate (C) Norepinephrine (D) Serotonin\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The patient feels sad and among the options, only Dopamine and Serotonin can help increase positive emotions. Serotonin also affects digestion and metabolism, which can help the patient's decreased appetite and sleep difficulty. The answer is (D).\n\nQ: A 42-year-old man comes to the office for preoperative evaluation prior to undergoing adrenalectomy scheduled in 2 weeks. One month ago, he received care in the emergency department for pain over his right flank following a motor vehicle collision. At that time, blood pressure was 160/100 mm Hg and CT scan of the abdomen showed an incidental 10-cm left adrenal mass. Results of laboratory studies, including complete blood count, serum electrolyte concentrations, and liver function tests, were within the reference ranges. The patient otherwise had been healthy and had never been told that he had elevated blood pressure. He takes no medications. A follow-up visit in the office 2 weeks ago disclosed elevated urinary normetanephrine and metanephrine and plasma aldosterone concentrations. The patient was referred to a surgeon, who recommended the adrenalectomy. Today, vital signs are temperature 36.6\u00b0C (97.9\u00b0F), pulse 100/min, respirations 14/min, and blood pressure 170/95 mm Hg. Physical examination discloses no significant findings. Initial preoperative preparation should include treatment with which of the following?\n(A) Labetalol (B) A loading dose of potassium chloride (C) Nifedipine (D) Phenoxybenzamine\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The symptoms and the adrenal mass suggested pheochromocytoma, and the blood pressure indicates hypertension. Phenoxybenzamine is used to treat hypertension caused by pheochromocytoma. The answer is (D).", "professional_psychology": "The following are multiple choice questions (with answers) about professional psychology.\n\nQ: In the construction of a multiple regression equation for purposes of prediction, the optimal combination of measures is one in which the predictors\n(A) are uncorrelated with each other but are moderately correlated with the criterion (B) have low correlations with each other and low correlations with the criterion (C) are highly intercorrelated with each other and moderately correlated with the criterion (D) have low correlations with the criterion bur are moderately correlated with each other\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. The basis of multiple regression is to assess the relationship between one continuous variable and a set of independent variables. So the predictors should be uncorrelated with each other but are moderately correlated with the criterion. The answer is (A).\n\nQ: There are three ways to measure the Central Tendency: the Mean, the Median and the Mode. From your knowledge about them, what is the mode?\n(A) less sensitive to extreme scores than the mean (B) more useful for skewed distributions (C) sensitive to extreme values and highly skewed distributions (D) the most frequently occurring number\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. The definition of mode is the most frequently occurring number. The answer is (D).\n\nQ: Carl Jung believed that a client's transference:\n(A) is a fantasy that distracts the client from reality. (B) represents \u201cmixed feelings\u201d toward the therapist. (C) \"is a form of \"\"acting out.\"\"\" (D) reflects the client\u2019s personal and collective unconscious.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Transference is a phenomenon that a person's feelings are unconsciously redirected, so it reflects the client's personal and collective unconscious. The answer is (D).\n\nQ: In terms of Hofstede\u2019s (1980) five cultural dimensions, the United States scores at the top of the scale on:\n(A) individualism. (B) individualism and power distance. (C) power distance and masculinity. (D) uncertainty avoidance.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. US scores highest on individualism among the five cultural dimensions. The answer is (A).\n\nQ: One of your therapy clients asks your advice about a good weight- reduction program. You have investigated the programs in the community and are enrolled in the one you consider the best. This program offers a $50 bonus to its patrons for each new person they bring into the program. Under these circumstances, your most appropriate response would be to\n(A) tell your client the pros and cons of each program you know about except for the one in which you are enrolled (B) recommend to your client the program in which you are enrolled and explain the $50 bonus you will receive (C) recommend to your client the program in which you are enrolled and offer to have the $50 bonus credited to your client's account in the program (D) tell your client the pros and cons of each program you know about, but do not claim the $50 bonus if your client enrolls in your program\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Based on the circumstances, you should tell your client about the pros and cons of each program, but it would be inappropriate to receive the bonus, so you should not claim the $50 bonus. The answer is (D).", "public_relations": "The following are multiple choice questions (with answers) about public relations.\n\nQ: Earth Hour was a campaign launched by which organization?\n(A) Greenpeace (B) The UN (C) Oxfam (D) World Wildlife Fund\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. Earth Hour is a worldwide movement oragnized launched by the World Wildlife Fund. The answer is (D).\n\nQ: In issues management, what is the most proactive approach to addressing negative or misleading information posted online about your organization?\n(A) Buy domain names that could be used by opposition groups. (B) Post anonymous comments on blogs to combat this information. (C) Prepare a news release that discredits the inaccurate information. (D) Make policy changes to address complaints highlighted on these sites.\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. In issues management, the most proactive approach to addressing negative or misleading information posted online is to make policy changes to address complaints highlighted on those sites. The answer is (D).\n\nQ: At which stage in the planning process would a situation analysis be carried out?\n(A) Defining the program (B) Planning the program (C) Taking action and implementing ideas (D) Evaluation of the program\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. Situation analyses are typically carried out during the planning process stage of defining the program. The answer is (A).\n\nQ: Which of these statements is true of the Vatican in 2010 at the time of the accusations of child abuse cover-ups?\n(A) There was a coordinated media response. (B) Consistent messages were communicated. (C) Criticisms were taken as attacks on the Catholic Church. (D) The credibility of the Vatican was upheld.\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. In 2010 when there were accusations of child abuse cover-ups, the Vatican took those criticisms as attacks on the Catholic Church. The answer is (C).\n\nQ: What should a public relations media practitioner do if she does not know the answer to a reporter's question?\n(A) Give the reporter other information she is certain is correct. (B) Say that the information is 'off the record' and will be disseminated later. (C) Say 'I don't know' and promise to provide the information later. (D) Say 'no comment,' rather than appear uninformed.\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. If a public relations media practitioner does not know the answer to a reporter's question, they should say 'I don't know' and offer to provide the information later. The answer is (C).", "security_studies": "The following are multiple choice questions (with answers) about security studies.\n\nQ: What are the frameworks of analysis within which terrorism has been considered (as of 2020)?\n(A) Competition between larger nations has resulted in some countries actively supporting terrorist groups to undermine the strength of rival states. Terrorist networks are extended patronage clubs maintained and paid for by their donor states and are conceptualised as being like state actors, to be dealt with using military force. (B) Globalization has enabled the internationalization of terrorist activities by opening up their operational space, although coordination is still managed from a geographical base. This suggests that terrorist groups are nationally structured which means that terrorism cannot be considered in terms of a war to be defeated militarily without having serious implications on the indigenous population. (C) Terrorism can be viewed as a problem to be resolved by military means (war on terrorism), by normal police techniques (terrorism as crime), or as a medical problem with underlying causes and symptoms (terrorism as disease). (D) Terrorism is viewed as a criminal problem. The criminalization of terrorism has two important implications. Firstly, it suggests that terrorism can be eradicated - terrorists can be caught and brought to trial by normal judicial proceedings thereby removing the threat from society - and secondly, it suggests that preventative crime techniques are applicable to prevent its development.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. (A) is wrong because it is not competition between larger nations that causes terrorism. \n(B) is wrong because globalization is not the cause of terrorism.\n(C) is correct because the US undertook the war on terrorism. \n(D) is wrong because preventative crime techniques will likely not end terrorism. The answer is (C).\n\nQ: Which of the following is the best lens through which to investigate the role of child soldiers?\n(A) Child soldiers are victims of combat that need re-education and rehabilitation. (B) Children and their mothers are not active subjects in warfare and are best considered as subjects in the private sphere. (C) Children are most often innocent bystanders in war and are best used as signifiers of peace. (D) Children have political subjecthood that is missed when they are considered as passive victims of warfare.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. Child soliders as a political topic can be missed when they are considered passive victims of warfare. The answer is (D).\n\nQ: How can we best describe the relationship between the state-centric approach and the concept of human security?\n(A) There are such wide divisions within the human security framework regarding the nature of threats and referent objects that no widely applicable comparisons between state-centric approaches and human security can be drawn. (B) By adopting the framework of human security, the limitations of the realist state-centric approach become evident. Whilst human security defines the referent object as the person or population, state-centric approaches prioritise the security of the state, de-prioritizing the pursuit of human security. (C) The state-centric approach to security is a faction of human security, usually defined within the broad school of human security. By being state-centric this approach prioritises the individual as the referent object in security studies. (D) Both the state-centric and human-centric approaches to security are mutually exclusive and offer a sufficient analytic framework with which to understand the international security system. It is therefore the role of security analysts to determine which of these substantial concepts is correct, and which should be discarded.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. Human security focuses on a person or population whereas state-centric approaches focus on the state while deprioritizing human security. The answer is (B).\n\nQ: In order to become securitized, a threat must be presented in which of these ways?\n(A) As an existential threat that requires immediate and extraordinary action, posing a threat to the survival of the state or to societal security. (B) As requiring immediate and extraordinary action by the state, threatening the survival of a referent object and therefore warranting the use of measures not normally employed in the political realm. (C) As an urgent threat to the survival of the referent object, so serious that it legitimises the employment of extraordinary action in response. (D) As an urgent threat to the survival of the audience that requires extraordinary or emergency measures.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. To be securitized, a threat must be an urgent threat to the survival of the referent object. The answer is (C).\n\nQ: What distinguishes coercive diplomacy from military force?\n(A) Compellence is another term for coercive diplomacy, but covering a narrower set of criteria; compellence covers those threats aimed at initiating adversary action. A threat to coerce a state to give up part of its territory would count as coercive diplomacy, as long as that threat proactively initiates action before reactive diplomacy is taken. (B) Coercive diplomacy constitutes the threats of limited force to induce adversary's incentive to comply with the coercer's demands. It is an influence strategy that is intended to obtain compliance: the use of force to defeat an opponent first does not count. It leaves an element of choice with the target to comply, or to continue. (C) Military force, or the threat of military force, utilises fear to achieve strategic objectives. Coercive diplomacy is differentiated from this approach, because it does not use fear as a tool for coercing an adversary. (D) Coercive diplomacy is employed to use force but to limit its effects on the international community. Coercive diplomacy is an aggressive strategy that is intended to obtain compliance through defeat. It does not leave an element of choice with the target, the target either being forced to comply or engage in conflict. It seeks to control by imposing compliance by removing any opportunity for negotiation or concession.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. Coercive diplomacy uses the threat of force to induce the opponent to comply with demands. The answer is (B).", "sociology": "The following are multiple choice questions (with answers) about sociology.\n\nQ: Which of the following is not a problem associated with official statistics on strike action?\n(A) most strikes go unnoticed by employers and the mass media (B) not all industrial disputes will be reported by the employer (C) the definition of strikes excludes those that involve fewer than ten workers or last less than one day (D) it is hard to compare strikes that were measured in different ways\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. Official statistics on strike action can be problematic because not all industrial disputes will be reported by employers, the definition of strikes excludes those that involves fewer than ten workers or last less than one day, and it is hard to compare strikes that were measured in different ways. Thus, (A) is not a problem associated with official statistics on strike action. The answer is (A).\n\nQ: What does Berger (1963) describe as a metaphor for social reality?\n(A) a fairground ride (B) a circus (C) a puppet theatre (D) a ballet\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. Berger describes social reality using the metaphor of a puppet theatre. The answer is (C).\n\nQ: The term 'hegemony' refers to:\n(A) the tendency for the working class not to realize their own interests (B) a dominant ideology that legitimates economic, political and cultural power (C) a form of dual consciousness based on ideology and everyday experiences (D) a mode of payment given for outstanding topiary\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. Hegemony refers to a dominant ideology that legitimates economic, policital, and cultural power. The answer is (B).\n\nQ: The shift from 'civil religion' to 'common religion' means that:\n(A) the increasing bureaucracy of the state has made religion only a marginal part of our lives (B) despite the weakening of traditional authority, our everyday lives and 'common sense' remain shaped by religious beliefs and values (C) religious participation in collective worship may have declined, but people still practise their faiths in private (D) people are much more likely to discuss their religious beliefs in public, informal settings\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. The shift from civil religion to common religion means that despite the weakening of traditional authority, our everyday lives and common sense remain shaped by religious beliefs and values. The answer is (B).\n\nQ: Which of the following did the post-war welfare state of 1948 not aim to provide:\n(A) free health care and education for all (B) a minimum wage (C) full employment (D) universal welfare\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. The post-war welfare state of 1948 aimed to provide free healthcare and education, full employment, and universal welfare. But it did not aim to provide a minimum wage. The answer is (B).", "us_foreign_policy": "The following are multiple choice questions (with answers) about us foreign policy.\n\nQ: How did Donald Trump attack globalization in the 2016 campaign?\n(A) Globalization had made men like him too rich (B) Globalization only benefited certain American states, such as New York (C) Liberal elites had encouraged globalization, while 'ordinary Americans' lost jobs because of it (D) Globalization encouraged damaging trade wars\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. Trump attacked globalization because he believed ordinary Americans lost jobs due to it, and so he wanted to blame liberals who had encouraged it. The answer is (C).\n\nQ: How did NSC-68 change U.S. strategy?\n(A) It globalized containment. (B) It militarized containment. (C) It called for the development of the hydrogen bomb. (D) All of the above\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. NSC-68 outlined a variety of courses of action, including globalization of containment, militarization of contaiment, and the development of the hydrogen bomb. The answer is (D).\n\nQ: How do Defensive Realism and Offensive Realism differ in their explanation of state behaviour?\n(A) Defensive realists place greater emphasis on the role of international institutions (B) Defensive realists place less emphasis on geographical factors (C) Offensive realists give more priority to the national interest than Defensive realists. (D) Defensive realists believe states are security maximizers, while Offensive realists believe states to be power maximizers\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. While defensive realism advocates that states are security maximizers, offensive realists think of states as power maximizers. The answer is (D).\n\nQ: The realm of policy decisions concerned primarily with relations between the United States and the rest of the world is known as\n(A) terrorism policy. (B) economic policy. (C) foreign policy. (D) international policy.\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. The topic of policy decisions concerns with relations between the US and the rest of the world is known as foreign policy. The answer is (C).\n\nQ: How did the 2008 financial crisis affect America's international reputation?\n(A) It damaged support for the US model of political economy and capitalism (B) It created anger at the United States for exaggerating the crisis (C) It increased support for American global leadership under President Obama (D) It reduced global use of the US dollar\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. The 2008 financial crisis damanged the international reputation of the American model of political economy and capitalism. The answer is (A).", "virology": "The following are multiple choice questions (with answers) about virology.\n\nQ: The median survival time to AIDS and death was established by following:\n(A) Seroprevalent HIV-infected individuals (B) Seronegatives (C) Seroconverters (D) High-risk seronegatives\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. The median survival time to AIDS and death was established as a result of the development of seroconverters. The answer is (C).\n\nQ: Which of the following is a morphological characteristic of the paramyxoviruses.\n(A) Fragile viruses often visualised with RNA spewing from the inside (B) Elongate viruses (C) Icosahedral viruses with envelope (D) Very large viruses\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. Paramyxoviruses are fragile viruses often visualised with RNA spewing from the inside. The answer is (A).\n\nQ: The most important goal of a behavioral intervention is:\n(A) Change in behavior (B) Comprehensive coverage (C) Effective use of behavioral theory (D) Sustained behavior change\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. The prim goal of a behavioral intervention is to cause sustained behavior change. The answer is (D).\n\nQ: A key factor facilitating the application of nested case-control studies from the MACS was:\n(A) Data collection (B) Establishment of a repository of biologic specimens (C) Participant interest (D) Administration of the questionnaire by staff\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. The Multicenter AIDS Cohort Study's use of nested case-control studies was facilitated by the establishment of a repository of biologic specimens. The answer is (B).\n\nQ: Why are parvoviruses a highly impactful parasite?\n(A) Because they have no nucleic acid (B) They require a helper virus (C) Only replicate in dividing cells (D) Can integrate into host chromosomes\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. Paroviruses are highly impactful because they do not have nucleic acid. The answer is (A).", "world_religions": "The following are multiple choice questions (with answers) about world religions.\n\nQ: How can the Upanishads be characterized?\n(A) Ritual texts (B) Philosophical texts (C) Hymns (D) Origin stories\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The Upanishads are the most recent part of Vedas (the oldest scriptures in Hinduism) and supplied the basis of later Hindu philosophy. So they are philosophical texts. The answer is (B).\n\nQ: What is the Second Gem in Buddhism?\n(A) The Dharma (B) The Sangha (C) The Buddha (D) The Bodhisattva\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The Second Gem in Buddhism is The Dharma. The answer is (A).\n\nQ: Which Japanese government promoted a kind of national cult based on the emperor and his associations with kami?\n(A) Honen (B) Tanaka (C) Tokugawa (D) Meiji\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The promotion of a national cult based on the emperor and his associations with Kami happened during the reign of Emperor Meiji (1852-1912). The answer is (D).\n\nQ: In which dynasty was the \"Mandate of Heaven\" developed to legitimatize the new rulers?\n(A) Shang (B) Zhou (C) Han (D) Xia\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The \"Mandate of Heaven\" was developed as an ancient Chinese philosophical concept during the Zhou Dynasty (1046-256 BCE). The answer is (B).\n\nQ: What is the sign of the covenant for Jewish males?\n(A) The rainbow (B) Circumcision (C) A son (D) Bar mitzvah\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. In Judaism, the most distinctive sign of the covenant is circumcision (brit milah). The answer is (B)."} \ No newline at end of file +{"abstract_algebra": "The following are multiple choice questions (with answers) about abstract algebra.\n\nQ: Statement 1 | Every element of a group generates a cyclic subgroup of the group. Statement 2 | The symmetric group S_10 has 10 elements.\n(A) True, True (B) False, False (C) True, False (D) False, True\nA: Let's think step by step. A cyclic group is a group that is generated by a single element. Hence a subgroup generated by a single element of a group is cyclic and Statement 1 is True. The answer is (C).\n\nQ: The symmetric group $S_n$ has $\nactorial{n}$ elements, hence it is not true that $S_{10}$ has 10 elements.\nFind the characteristic of the ring 2Z.\n(A) 0 (B) 3 (C) 12 (D) 30\nA: Let's think step by step. A characteristic of a ring is R is $n$ if the statement $ka = 0$ for all $a\\in 2Z$ implies that $k$ is a multiple of $n$. Assume that $ka = 0$ for all $a\\in 2Z$ for some $k$. In particular $2k = 0$. Hence $k=0$ and $n=0$. The answer is (A).\n\nQ: Statement 1| Every function from a finite set onto itself must be one to one. Statement 2 | Every subgroup of an abelian group is abelian.\n(A) True, True (B) False, False (C) True, False (D) False, True\nA: Let's think step by step. Statement 1 is true. Let $S$ be a finite set. If $f:S \nightarrow S$ is a onto function, then $|S| = |f(S)|$. If $f$ was not one to one, then for finite domain $S$ the image would have less than $S$ elements, a contradiction.\nStatement 2 is true. Let $G$ be an abelian group and $H$ be a subgroup of $G$. We need to show that $H$ is abelian. Let $a,b \\in H$. Then $a,b \\in G$ and $ab=ba$. Since $G$ is abelian, $ab=ba$. Since $H$ is a subgroup of $G$, $ab \\in H$. Therefore, $ab=ba$ and $H$ is abelian. The answer is (A).\n\nQ: Statement 1 | If aH is an element of a factor group, then |aH| divides |a|. Statement 2 | If H and K are subgroups of G then HK is a subgroup of G.\n(A) True, True (B) False, False (C) True, False (D) False, True\nA: Let's think step by step. Statement 2 is false. Let $H$ be a subgroup of $S_3$ generated by the cycle $(1,2)$ and $K$ be a subgroup of $S_3$ generated by the cycle $(1,3)$. Both $H$ and $K$ have two elements, the generators and the identity. However $HK$ contains cycles (1,2), (1,3) and (2,3,1), but the inverse of (2,3,1) is (2,1,3) and it does not belong to HK, hence HK is not a subgroup. The answer is (B).\n\nQ: Find all c in Z_3 such that Z_3[x]/(x^2 + c) is a field.\n(A) 0 (B) 1 (C) 2 (D) 3\nA: Let's think step by step. Z_3[x]/(x^2 + c) is a field if and only if x^2 + c does not have roots in Z_3. That is x^2 + c != 0 for every x in Z_3. If c = 0, then x^2 + c = x^2 has root 0. If c = 1 then x^2 + c = x^2 + 1 = 0 + 1 for x = 0, 1 + 1 = 2 for x = 1 and 1 + 1 = 2 for x = 2, hence x^2 + 1 does not have any roots. For c = 2 the polynomial x^2 + 2 has two roots at x = 1 and x = 2. Hence Z_3[x]/(x^2 + c) is a field if and only if c = 1. The answer is (B).", "anatomy": "The following are multiple choice questions (with answers) about anatomy.\n\nQ: Which of the following is the body cavity that contains the pituitary gland?\n(A) Abdominal (B) Cranial (C) Pleural (D) Spinal\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. The pituitary gland is the major endocrine gland attached to the base of the brain, and it is contained in the Cranial cavity. The answer is (B).\n\nQ: Which of these branches of the trigeminal nerve contain somatic motor processes?\n(A) The supraorbital nerve (B) The infraorbital nerve (C) The mental nerve (D) None of the above\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. \nWe know the following: (A) The supraorbital nerve (also known as the frontal nerve) is the largest branch of the ophthalmic nerve and branch of ophthalmic division of the trigeminal nerve. (B) The infraorbital nerve is a branch of the maxillary division of the trigeminal nerve. (C) The mental nerve is a branch of the mandibular division of the trigeminal nerve. Because all these nerves are purely sensory nerves and do not contain any somatic motor processes. Therefore, the answer should be none of the above, which is (D). The answer is (D).\n\nQ: In Angle's Class II Div 2 occlusion there is\n(A) excess overbite of the upper lateral incisors. (B) negative overjet of the upper central incisors. (C) excess overjet of the upper lateral incisors. (D) excess overjet of the upper central incisors.\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. This is a question related to anatomy and orthodontics. Excess overjet is associated with Class II occlusions; therefore, we can safely eliminate (B) from the list, as negative overjet is often associated with Class III occlusions. Now, we need to determine the location of the excess overjet, and that would be the upper (maxillary) lateral incisors. Only (C) has the correct information. The answer is (C).\n\nQ: The pleura\n(A) have no sensory innervation. (B) are separated by a 2 mm space. (C) extend into the neck. (D) are composed of respiratory epithelium.\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. First, recall that the pleura refers to the thin layer of tissue that covers the lungs and lines the interior wall of the chest cavity. Now, let\u2019s look at each option:\nOption (A): \u201cThe pleura have no sensory innervation.\u201d This information is not correct. The pleura do have a sensory innervation.\nOption (B): \u201cThe pleura are separated by a 2 mm space.\u201d This information is not correct. There is a very thin \u201cpotential\u201d space between the layers of the pleura; however, it is typically filled with serous pleural fluid. \nOption (C): \u201cThe pleura extend into the neck.\u201d This information is actuakky true. The cervical pleura, also known as the dome of the pleuradome of the pleura, lines the extendsiton of the pleural cavity into the neck.\nOption (D): \u201cThe pleura are composed of respiratory epithelium.\u201d This information is not correct. The pleaura are composed of connective tissue (CT).\nBecause (A), (B), and (D) are all incorrect, (D) is the only correct answer. The answer is (C).\n\nQ: What is the embryological origin of the hyoid bone?\n(A) The first pharyngeal arch (B) The first and second pharyngeal arches (C) The second pharyngeal arch (D) The second and third pharyngeal arches\nA: Let's think step by step. We refer to Wikipedia articles on anatomy for help. Let\u2019s solve this problem step by step. The hyoid bone, which is also known as the hyooid, is a a small U-shaped bone located in the anterior neck. In its resting position, it lies between the ase of the mandible and the third cervical vertebrae. We know that the second and the third pharyngeal arches give rise to the horns of the hyoid bone; therefore, the embryological origin of the hyoid bone are the second and the third pharyngeal arches\u2014this information is covered in the last option (D). Therefore, we conclude that (D) must be the correct answer. The answer is (D).", "astronomy": "The following are multiple choice questions (with answers) about astronomy.\n\nQ: Where do most short-period comets come from and how do we know?\n(A) The Kuiper belt; short period comets tend to be in the plane of the solar system just like the Kuiper belt. (B) The Kuiper belt; short period comets tend to come from random directions indicating a spherical distribution of comets called the Kuiper belt. (C) The asteroid belt; short period comets have orbital periods similar to asteroids like Vesta and are found in the plane of the solar system just like the asteroid belt. (D) The Oort cloud; short period comets tend to be in the plane of the solar system just like the Oort cloud.\nA: Let's think step by step. Most short-period comets come from the Kuiper belt, and we know because short period coments tend to be in the plane of the solar system, just like the Kuiper belt is. The answer is (A).\n\nQ: You are pushing a truck along a road. Would it be easier to accelerate this truck on Mars? Why? (Assume there is no friction)\n(A) It would be harder since the truck is heavier on Mars. (B) It would be easier since the truck is lighter on Mars. (C) It would be harder since the truck is lighter on Mars. (D) It would be the same no matter where you are.\nA: Let's think step by step. If we assume that there is no friction, the force needed to accelerate the truck is by Newton\u2019s second law only dependent on the mass of the truck. Hence (A), (B) and (C) are incorrect since it doesn\u2019t matter that it\u2019s on Mars, and (D) is the correct answer. The answer is (D).\n\nQ: Say the pupil of your eye has a diameter of 5 mm and you have a telescope with an aperture of 50 cm. How much more light can the telescope gather than your eye?\n(A) 10000 times more (B) 100 times more (C) 1000 times more (D) 10 times more\nA: Let's think step by step. The amount of light is proportional to the aperture area $A = \\pi D^2/4$ for a lens with diameter $D$, so the relative amounts of light between the eye with diameter 5mm and the telescope with diameter 50mm is $(50 cm)^2/(5mm)^2 = 10000$. The answer is (A).\n\nQ: Why isn't there a planet where the asteroid belt is located?\n(A) A planet once formed here but it was broken apart by a catastrophic collision. (B) There was not enough material in this part of the solar nebula to form a planet. (C) There was too much rocky material to form a terrestrial planet but not enough gaseous material to form a jovian planet. (D) Resonance with Jupiter prevented material from collecting together to form a planet.\nA: Let's think step by step. The asteroid belt is a stellar disc consisting of a large number of asteroids between Mars and Jupiter's orbits. The asteroids in this belt are affected by the gravitational pull from both other asteroids and nearby planets. Due to the strong gravitational force of Jupiter there are resonances that give rise to low density regions of asteroids known as the Kirkwood gap. So (B) and (C) are not correct since it\u2019s not a lack of material that prevents a planet from being formed, and (A) is incorrect because the Kirkwood gap would have prevented a planet from forming in the first place, and (D) is the correct option. The answer is (D).\n\nQ: Why is Mars red?\n(A) Because the surface is covered with heavily oxidized (\"rusted\") minerals. (B) Because the atmosphere scatters more light at bluer wavelengths transmitting mostly red light. (C) Because Mars is covered with ancient lava flows which are red in color. (D) Because flowing water on Mars's surface altered the surface minerals several billion years ago.\nA: Let's think step by step. Option (B) is not correct because if the red color was caused by the scattering off the atmosphere, then the earth with a much thicker atmosphere would also look red. Options (C) and (D) are not specific enough about why the color of the surface would be red, while (A) is correct because it explains that the surface is red due to the rusted materials on the surface and the red color comes from the rust. So the correct option is (A). The answer is (A).", "business_ethics": "The following are multiple choice questions (with answers) about business ethics.\n\nQ: In contrast to _______, _______ aim to reward favourable behaviour by companies. The success of such campaigns have been heightened through the use of ___________, which allow campaigns to facilitate the company in achieving _________ .\n(A) Buycotts, Boycotts, Blockchain technology, Charitable donations (B) Buycotts, Boycotts, Digital technology, Increased Sales (C) Boycotts, Buyalls, Blockchain technology, Charitable donations (D) Boycotts, Buycotts, Digital technology, Increased Sales\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cIn contrast to *boycotts*, *buycotts* aim to reward favourable behavior by companies. The success of such campaigns have been heightened through the use of *digital technology*, which allow campaigns to facilitate the company in achieving *increased sales*.\u201d The answer is (D).\n\nQ: _______ is the direct attempt to formally or informally manage ethical issues or problems, through specific policies, practices and programmes.\n(A) Corporate social responsibility (B) Business ethics management (C) Sustainability (D) Environmental management\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The direct attempt manage ethical issues through specific policies, practices, and programs is business ethics management. The answer is (B).\n\nQ: Three contrasting tactics that CSO's can engage in to meet their aims are ________ which typically involves research and communication, ________, which may involve physically attacking a company's operations or ________, often involving some form of _______.\n(A) Non-violent direct action, Violent direct action, Indirect action, Boycott (B) Indirect action, Instrumental action, Non-violent direct action, Information campaign (C) Indirect action, Violent direct action, Non-violent direct-action Boycott (D) Non-violent direct action, Instrumental action, Indirect action, Information campaign\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cThree contrasting tactics that CSO's can engage in to meet their aims are *indirect action*, which typically involves research and communication, *violent direct action*, which may involve physically attacking a company's operations or *non-violent direct action*, often involving some form of *boycott*.\u201d The answer is (C).\n\nQ: To ensure the independence of the non-executive board members, there are a number of steps which can be taken, which include non-executives being drawn from _______ the company, being appointed for a _________ time period as well as being appointed _________.\n(A) Outside, Limited, Independently (B) Inside, Limited, Intermittently (C) Outside, Unlimited, Intermittently (D) Inside, Unlimited, Independently\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cTo ensure the independence of the non-executive board members, there are a number of steps which can be taken, which include non-executives being draw from *outside* the company, being appointed for a *limited* time period as well as being imported *independently*. The answer is (A).\n\nQ: Beyond the business case for engaging in CSR there are a number of moral arguments relating to: negative _______, the _______that corporations possess and the ________ of business and society.\n(A) Externalities, Power, Independence (B) Publicity, Insubstantial resources, Mutual dependence (C) Publicity, Power, Independence (D) Externalities, Power, Mutual dependence\nA: Let's think step by step. We refer to Wikipedia articles on business ethics for help. The sentence that best uses the possible options above is \u201cBeyond the business case for engaging the CSR there are a number of moral arguments relating to: negative *externalities*, the *power* that corporations possess and the *mutual independence* of business and society. The answer is (D).", "clinical_knowledge": "The following are multiple choice questions (with answers) about clinical knowledge.\n\nQ: Glycolysis is the name given to the pathway involving the conversion of:\n(A) glycogen to glucose-1-phosphate. (B) glycogen or glucose to fructose. (C) glycogen or glucose to pyruvate or lactate. (D) glycogen or glucose to pyruvate or acetyl CoA.\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. Glycolysis is the name given to the pathway involving conversion of glycogen or glucose to pyruvate or lactate. The answer is (C).\n\nQ: What is the difference between a male and a female catheter?\n(A) Male and female catheters are different colours. (B) Male catheters are longer than female catheters. (C) Male catheters are bigger than female catheters. (D) Female catheters are longer than male catheters.\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. The difference between a male and female catheter is that male catheters tend to be longer than female catheters. The answer is (B).\n\nQ: How many attempts should you make to cannulate a patient before passing the job on to a senior colleague, according to the medical knowledge of 2020?\n(A) 4 (B) 3 (C) 2 (D) 1\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. According to the medical protocol as of 2020, you should make two attempts to cannulate a patient before passing the job on to a more-senior practitioner. The answer is (C).\n\nQ: In the assessment of the hand function which of the following is true?\n(A) Abduction of the thumb is supplied by spinal root T2 (B) Opposition of the thumb by opponens policis is supplied by spinal root T1 (C) Finger adduction is supplied by the median nerve (D) Finger abduction is mediated by the palmar interossei\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. Of all the options, it is only true that the opposition of the thumb by opponens pollicis is supplied by spinal root T1. The answer is (B).\n\nQ: The energy for all forms of muscle contraction is provided by:\n(A) ATP. (B) ADP. (C) phosphocreatine. (D) oxidative phosphorylation.\nA: Let's think step by step. We refer to Wikipedia articles on clinical knowledge for help. The energy for muscular contraction is provided by ATP (adenosine triphosphate), which is the powerhouse of the cell. The answer is (A).", "college_biology": "The following are multiple choice questions (with answers) about college biology.\n\nQ: Which of the following represents an accurate statement concerning arthropods?\n(A) They possess an exoskeleton composed primarily of peptidoglycan. (B) They possess an open circulatory system with a dorsal heart. (C) They are members of a biologically unsuccessful phylum incapable of exploiting diverse habitats and nutrition sources. (D) They lack paired, jointed appendages.\nA: Let's think step by step. Peptidoglycan is known to comprise the plasma membrane of most bacteria, rather than the exoskeleton of arthropods, which is made of chitin, which rules out (A). The answer (C) is false because arthropods are a highly successful phylum. Likewise, arthropods have paired, jointed appendages, which rules out (D). The only remaining option is (B), as arthropods have an open circulatory system with a dorsal tubular heart. The answer is (B).\n\nQ: In a given population, 1 out of every 400 people has a cancer caused by a completely recessive allele, b. Assuming the population is in Hardy-Weinberg equilibrium, which of the following is the expected proportion of individuals who carry the b allele but are not expected to develop the cancer?\n(A) 1/400 (B) 19/400 (C) 20/400 (D) 38/400\nA: Let's think step by step. According to the Hardy Weinberg Law, $p^2 + 2 p q + q^2 = 1$, and $p + q = 1$ where $p$ is the frequency of the dominant allele, $q$ is the frequency of the recessive allele, and $p^2$, $q^2$, and $2pq$ are the frequencies of dominant homozygous, recessive homozygous, and heterozygous individuals, respectively. \u200bThe frequency of the recessive allele (q) is $\\sqrt{\frac{1}{400}} = 0.05$. We have $p = 1 - q = 0.95$. The frequency of heterozygous individuals is $2pq = 2 \\cdot 0.05 \\cdot 0.95 = 0.095$. The number of heterozygous individuals is equal to the frequency of heterozygous individuals times the size of the population, or $0.095 * 400 = 38$. So we end up with 38/400. The answer is (D).\n\nQ: According to the pressure-flow model of movement of phloem contents, photosynthate movement from source to sink is driven by\n(A) an ATP-dependent pressure-flow pump (B) a water-pressure potential gradient (C) transpiration (D) apoplastic diffusion\nA: Let's think step by step. It is a gradient in water pressure that induces the movement of phloem content, which refers to answer (B). The mechanism of movement does not rely on metabolism, which rules out (A). Transpiration refers to the exhalation of water vapor through plant stomata, and is also not related, which rules out (C). While the apoplastic pathway is one of two main pathways for water transport in plants, it is not central to the pressure flow model, which rules out (D). The answer is (B).\n\nQ: Which of the following contain DNA sequences required for the segregation of chromosomes in mitosis and meiosis?\n(A) Telomeres (B) Centromeres (C) Nucleosomes (D) Spliceosomes\nA: Let's think step by step. The genetic material in Telomeres is not used, which rules out (A). Nucleosomes are the repeating subunit that comprises chromatin packed in a cell nucleus, and do not specifically refer to DNA sequences necessary for segregating chromosomes in cell division, which rules out (C). A spliceosome is a large ribonucleoprotein that removes introns from transcribed pre-mRNA rather than governing chromosome segregation. Centromeres are directly responsible for segregating chromosomes in cell division. The answer is (B).\n\nQ: The presence of homologous structures in two different organisms, such as the humerus in the front limb of a human and a bird, indicates that\n(A) the human and bird are polyphyletic species (B) a human's and bird's evolution is convergent (C) the human and bird belong to a clade (D) the human and bird developed by analogy\nA: Let's think step by step. Polyphyletic species are organisms that are grouped due to having similar characteristics but which do not have a common ancestor. This is not the case for humans and birds, which rules out (A). Convergent evolution refers to the indepdendent development of similar features in different species at different periods, which is also not the case for humans and birds, which rules out (B). Analogy refers to the superficial resemblance of structures that have different origins, which is not the case for the human and bird forearms, which rules out (D). Humans and birds do belong to the same clade - a group of organisms composed of a common ancestor. The answer is (C).", "college_chemistry": "The following are multiple choice questions (with answers) about college chemistry.\n\nQ: 3 Cl\u2212(aq) + 4 CrO_4^2\u2212(aq) + 23 H+(aq) \u2192 3 HClO2(aq) + 4 Cr3+(aq) + 10 H2O(l). In the reaction shown above, Cl\u2212(aq) behaves as\n(A) an acid (B) a base (C) a catalyst (D) a reducing agent\nA: Let's think step by step. A molecule that behaves as a base accepts an H+ ion (or proton) from another molecule, whereas a molecule that behaves as an acid donates an H+ ion (or proton) to another molecule. Neither of these is the case for Cl in this reaction, which rules out (A) and (B). A catalyst is a substance that only accelerates a reaction without itself undergoing chemical change, which is not the case here. This rules out (C). Instead, the $Cl^{-} molecules carry a negative charge, which they donate in the reaction to form 3 HClO2. This is the behavior of a reducing agent, or (D). The answer is (D).\n\nQ: Which of the following statements about the lanthanide elements is NOT true?\n(A) The most common oxidation state for the lanthanide elements is +3. (B) Lanthanide complexes often have high coordination numbers (> 6). (C) All of the lanthanide elements react with aqueous acid to liberate hydrogen. (D) The atomic radii of the lanthanide elements increase across the period from La to Lu.\nA: Let's think step by step. The atomic radii of the lanthanide elements in fact decrease across the period from La to Lu. Options (A), (B), and (C) are all true. This means that only (D) is NOT true. The answer is (D).\n\nQ: Which of the following lists the hydrides of group-14 elements in order of thermal stability, from lowest to highest?\n(A) PbH4 < SnH4 < GeH4 < SiH4 < CH4 (B) PbH4 < SnH4 < CH4 < GeH4 < SiH4 (C) CH4 < SiH4 < GeH4 < SnH4 < PbH4 (D) CH4 < PbH4 < GeH4 < SnH4 < SiH4\nA: Let's think step by step. The thermal stability of group-14 hydrides decreases as we move from the top of group 14 to the bottom. The order of elements in the group from top to bottom is C, Si, Ge, Sn, Pb. Therefore in order of increasing thermal stability we have PbH4, SnH4, GeH4, SiH4, and CH4, or answer (A). The answer is (A).\n\nQ: Predict the number of lines in the EPR spectrum of a solution of 13C-labelled methyl radical (13CH3\u2022), assuming the lines do not overlap.\n(A) 4 (B) 3 (C) 6 (D) 24 (E) 8\nA: Let's think step by step. The electron paramagnetic resonance spectrum will be split by two forms of interactions. The first is the hyperfine interaction with the 13C (nuclear spin $I = \nrac{1}{2}$) which will split the spectrum into 2 lines. This will be further split into 4 lines by the interaction with three equivalent 1H nuclei. The total number of lines is therefore $2 \\cdot 4 = 8$. The answer is (E).", "college_computer_science": "The following are multiple choice questions (with answers) about college computer science.\n\nQ: Which of the following regular expressions is equivalent to (describes the same set of strings as) (a* + b)*(c + d)?\n(A) a*(c + d)+ b(c + d)\n(B) a*(c + d)* + b(c + d)*\n(C) a*(c + d)+ b*(c + d)\n(D) (a + b)*c +(a + b)*d\nA: Let's think step by step. We know that:\n1. (X* + Y)* = (X + Y)*\n2. X(Y + Z)? = XY + XZ\nUsing equation 1 we can rewrite (a* + b)*(c + d)? as:\n3. (a + b)*(c + d)?\nUsing equation 2 we can rewrite equation 3 as:\n(a + b)*c + (a + b)*d The answer is (D).\n\nQ: The Singleton design pattern is used to guarantee that only a single instance of a class may be instantiated. Which of the following is (are) true of this design pattern?\nI. The Singleton class has a static factory method to provide its instance.\nII. The Singleton class can be a subclass of another class.\nIII. The Singleton class has a private constructor.\n(A) I only\n(B) II only\n(C) III only\n(D) I, II, and III\nA: Let's think step by step. Statement I is a correct statement about a Singleton, because a Singleton restricts instantiation to a single, static method. Statement II is also correct, because there is no inherent restriction regarding the inheritance of a Singleton. Statement III is also correct, because a Singletons must be instantiated only once, so its constructor is made private to prevent any construction except via its static factory method.\nGiven these facts, statements I, II, and III are all correct. The answer is (D).\n\nQ: A certain pipelined RISC machine has 8 general-purpose registers R0, R1, . . . , R7 and supports the following operations:\nADD Rs1, Rs2, Rd (Add Rs1 to Rs2 and put the sum in Rd)\nMUL Rs1, Rs2, Rd (Multiply Rs1 by Rs2 and put the product in Rd)\nAn operation normally takes one cycle; however, an operation takes two cycles if it produces a result required by the immediately following operation in an operation sequence.\nConsider the expression AB + ABC + BC, where variables A, B, C are located in registers R0, R1, R2. If the contents of these three registers must not be modified, what is the minimum number of clock cycles required for an operation sequence that computes the value of AB + ABC + BC?\n(A) 5 (B) 6 (C) 7 (D) 8\nA: Let's think step by step. First, we are given that A is in R0, B is in R1, and C is in R2.\nNext, we can see that we must compute three multiplies (AB, BC, and ABC) and two adds (AB + ABC, (AB + ABC) + BC) to compute our final answer, resulting in a minimum of five clock cycles.\nNext, we can see that there is no way to avoid at least one pipeline stall when computing our final answer, because to compute our final sum we must wait at least one cycle for the results from the previous stage to be ready. Thus, our minimum number of cycles must be 6.\nWe can verify that we can create a solution that requires only six cycles as follows:\ncompute AB: MUL R0, R1, R3\ncompute BC: MUL R1, R2, R4\ncompute ABC: MUL R3, R4, R5\ncompute AB + BC: ADD R3, R4, R6\nSTALL\ncompute AB + ABC + BC: ADD R5, R6, R7\nSo there are 6 cycles. The answer is (B).\n\nQ: A compiler generates code for the following assignment statement.\nG := (A + B) * C - (D + E) * F\nThe target machine has a single accumulator and a single-address instruction set consisting of instructions load, store, add, subtract, and multiply. For the arithmetic operations, the left operand is taken from the accumulator and the result appears in the accumulator. The smallest possible number of instructions in the resulting code is\n(A) 5 (B) 6 (C) 7 (D) 9\nA: Let's think step by step. We can compute the final answer with the following sequence of operations:\n1. LOAD D (accumulator = D)\n2. ADD E (accumulator = D+E)\n3. MUL F (accumulator = (D+E)*F)\n4. STORE X (X = (D+E)*F)\n5. LOAD A (accumulator = A)\n6. ADD B (accumulator = A+B)\n7. MUL C (accumulator = (A+B)*C)\n8. SUB X (accumulator = (A+B)*C - (D+E)*F)\n9. STORE G (G = (A+B)*C - (D+E)*F)\nThis sequence takes 9 instructions. The answer is (D).\n\nQ: Consider a computer design in which multiple processors, each with a private cache memory, share global memory using a single bus. This bus is the critical system resource. Each processor can execute one instruction every 500 nanoseconds as long as memory references are satisfied by its local cache. When a cache miss occurs, the processor is delayed for an additional 2,000 nanoseconds. During half of this additional delay, the bus is dedicated to serving the cache miss. During the other half, the processor cannot continue, but the bus is free to service requests from other processors. On average, each instruction requires 2 memory references. On average, cache misses occur on 1 percent of references. What proportion of the capacity of the bus would a single processor consume, ignoring delays due to competition from other processors?\n(A) 1/50 (B) 1/27 (C) 1/25 (D) 2/27\nA: Let's think step by step. We know that each instruction requires two memory references per instruction, and that there is an average cache miss rate of one percent.\nThus a given processor has:\n(1 cache miss / 100 references) * (2 references / instruction) =\n(2 cache misses / 100 instructions), so:\nmisses_per_instruction = 1 cache miss / 50 instructions.\nNext, we know that each instruction requires 500 nanoseconds when there is no cache miss, and 500 + 2000 = 2500 nanoseconds when there is a cache miss. Thus:\n50 instructions / (49 * 500) + (1 * 2500) nanoseconds, so:\ninstructions_per_ns = 50 instructions / 27000 nanoseconds.\nNow, we know that each cache miss locks the bus for half of the 2000 nanosecond cache miss delay, or 1000 nanoseconds, so:\nlock_ns_per_miss = 1000 nanoseconds / cache miss.\nThus we can see that on average a single processor will lock the bus for:\nlock_ns_per_miss * misses_per_instruction * instructions_per_ns =\n(1000 nanoseconds / cache miss) * (1 cache miss / 50 instructions) * (50 instructions / 27000 nanoseconds) = 1000 * (1/50) * (50/27000) = 1000/27000 = 1/27. The answer is (B).", "college_mathematics": "The following are multiple choice questions (with answers) about college mathematics.\n\nQ: Let V be the set of all real polynomials p(x). Let transformations T, S be defined on V by T:p(x) -> xp(x) and S:p(x) -> p'(x) = d/dx p(x), and interpret (ST)(p(x)) as S(T(p(x))). Which of the following is true?\n(A) ST = 0 (B) ST = T (C) ST = TS (D) ST - TS is the identity map of V onto itself.\nA: Let's think step by step. For a given polynomial $p$ we have\n\\[ST(p) = (xp(x))\u2019 = p(x) + xp\u2019(x)\\]\nand\n\\[TS(p) = xp\u2019(x).\\]\nHence \\[ST(p) - TS(p) = p(x) + xp\u2019(x) - xp\u2019(x).\\] The answer is (D).\n\nQ: Suppose that f(1 + x) = f(x) for all real x. If f is a polynomial and f(5) = 11, then f(15/2)\n(A) -11 (B) 0 (C) 11 (D) 33/2\nA: Let's think step by step. The only polynomial so that $f(1 + x) = f(x)$ is a constant polynomial. Hence $f(5) = 11 = f(15/2)$. The answer is (C).\n\nQ: Let A be a real 2x2 matrix. Which of the following statements must be true?\nI. All of the entries of A^2 are nonnegative.\nII. The determinant of A^2 is nonnegative.\nIII. If A has two distinct eigenvalues, then A^2 has two distinct eigenvalues.\n(A) I only (B) II only (C) III only (D) II and III only\nA: Let's think step by step. We have \\[ det(A^2) = (det(A))^2 \\geq 0,\\] hence II holds.\nIII is false: as a counterexample take a diagonal matrix with -1 and 1 on the diagonal. Then $A^2$ is the identity matrix. The answer is (B).\n\nQ: Let A be the set of all ordered pairs of integers (m, n) such that 7m + 12n = 22. What is the greatest negative number in the set B = {m + n : (m, n) \\in A}?\n(A) -5 (B) -4 (C) -3 (D) -2\nA: Let's think step by step. We have 12n = 22 - 7m and one of the solutions is $m = -2$, $n = 3$. Then $m + n = 1$, hence we need to look for smaller $m$ in order to make $m + n$ negative. The next solution is $m = -14$ and $n = 10$. For smaller $m$ we have $m + n$ smaller than $-4$. The answer is (B).\n\nQ: A tank initially contains a salt solution of 3 grams of salt dissolved in 100 liters of water. A salt solution containing 0.02 grams of salt per liter of water is sprayed into the tank at a rate of 4 liters per minute. The sprayed solution is continually mixed with the salt solution in the tank, and the mixture flows out of the tank at a rate of 4 liters per minute. If the mixing is instantaneous, how many grams of salt are in the tank after 100 minutes have elapsed?\n(A) 2 (B) 2 - e^-2 (C) 2 + e^-2 (D) 2 + e^-4\nA: Let's think step by step. For all $t \\in \\mathbb{R}$, let $s(t)$ denote the number grams of salt in the tank at the $t$ minute mark. Then $s(0) = 3$.\nWe use $s$ and $s(t)$ interchangeably. We also use $s^{\\prime}$ and $s^{\\prime}(t)$ interchangeably. The solution sprayed into the tank adds $(0.02) 4=2 / 25$ grams of salt per minute. There are always 100 liters of liquid in the tank, containing $s$ grams of salt. So the density of salt in the tank is $s / 100$ grams per liter. The flow of water out of the tank therefore subtracts $4(s / 100)=s / 25$ grams of salt per minute. Then, for all $t \\in \\mathbb{R}$, we have $s^{\\prime}(t)=(2 / 25)-(s / 25)=(2-s) / 25$, and so $[s(t)=2] \\Rightarrow\\left[s^{\\prime}(t)=0\right]$. For all $t \\in \\mathbb{R}$,\n$$\n\frac{d}{d t}[\\ln (s-2)]=\frac{s^{\\prime}}{s-2}=\frac{-1}{25}=\frac{d}{d t}\\left[-\frac{t}{25}\right] .\n$$\nChoose $C \\in \\mathbb{R}$ such that, for all $t \\in \\mathbb{R}, \\ln ((s(t)-2))=-[t / 25]+C$. Let $K:=e^{C}$. Then, for all $t \\in \\mathbb{R}$, we have $(s(t))-2=K e^{-t / 25}$, and so $s(t)=2+K e^{-t / 25}$. Then $3=s(0)=2+K e^{0}=2+K$, so $K=1$. Then $s(100)=2+K e^{-100 / 25}=2+1 \\cdot e^{-4}=2+e^{-4}$. The answer is (D).", "college_medicine": "The following are multiple choice questions (with answers) about college medicine.\n\nQ: An expected side effect of creatine supplementation is:\n(A) muscle weakness. (B) gain in body mass. (C) muscle cramps. (D) loss of electrolytes.\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Creatine supplementation is a dietary supplement that results in body mass gain. The answer is (B).\n\nQ: Which of the following is not a true statement?\n(A) Muscle glycogen is broken down enzymatically to glucose-1-phosphate (B) Elite endurance runners have a high proportion of Type I fibres in their leg muscles (C) Liver glycogen is important in the maintenance of the blood glucose concentration (D) Insulin promotes glucose uptake by all tissues in the body\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Let\u2019s solve this step by step and go over each choice: \n(A) \u201cMuscle glycogen is broken down enzymatically to glucose-1-phosphate\u201d: This is a correct statement.\n(B) \u201cElite endurance runners have a high proportion of Type I fibres in their leg muscles\u201d: This is a correct statement.\n(C) \u201cLiver glycogen is important in the maintenance of the blood glucose concentration\u201d: This is a correct statement. \n(D) \u201cInsulin promotes glucose uptake by all tissues in the body\u201d: This is not a correct statement, because insulin promotes glucose uptake by the liver, adipose tissue, and muscle, but not all tissues. For instance, the tissues in the brain and red blood cells are not affected by insulin. The answer is (D).\n\nQ: A high school science teacher fills a 1 liter bottle with pure nitrogen and seals the lid. The pressure is 1.70 atm, and the room temperature is 25\u00b0C. Which two variables will both increase the pressure of the system, if all other variables are held constant?\n(A) Increasing temperature, increasing moles of gas (B) Increasing temperature, increasing volume (C) Decreasing volume, decreasing temperature (D) Decreasing moles of gas, increasing volume\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The relevant equation for this is the ideal gas law: PV=nRT. To increase the pressure of the system (P), then either n (number of moles of the gas) or T (temperature) have to increase. The answer is (A).\n\nQ: In a genetic test of a newborn, a rare genetic disorder is found that has X-linked recessive transmission. Which of the following statements is likely true regarding the pedigree of this disorder?\n(A) All descendants on the maternal side will have the disorder. (B) Females will be approximately twice as affected as males in this family. (C) All daughters of an affected male will be affected. (D) There will be equal distribution of males and females affected.\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Let\u2019s solve this step by step. Let's recall first that females have two X chromosomes, while males have one X and one Y chromosome. This is an important fact we need to know before answering this question. \nBecause a male can only pass his only one X chromosome to a daughter, if he is affected by this rare genetic disorder, then we know for sure that he will pass this rare genetic disorder to all his future-born daughters. Therefore, \u201c(C): All daughters of an affected male will be affected\u201d is a correct statement. The answer is (C).\n\nQ: Glucose is transported into the muscle cell:\n(A) via protein transporters called GLUT4. (B) only in the presence of insulin. (C) via hexokinase. (D) via monocarbylic acid transporters.\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Glucose (also known as the blood sugar) is the main sugar found in the human body. It is transported into the muscle cell via diffusion through protein transporters called GLUT4. The answer is (A).", "college_physics": "The following are multiple choice questions (with answers) about college physics.\n\nQ: A refracting telescope consists of two converging lenses separated by 100 cm. The eye-piece lens has a focal length of 20 cm. The angular magnification of the telescope is\n(A) 4 (B) 5 (C) 6 (D) 20\nA: Let's think step by step. In a refracting telescope, if both lenses are converging, the focus of both lenses must be between the two lenses, and thus the focal lengths of the two lenses must add up to their separation. Since the focal length of one lens is 20 cm, the focal length of the other must be 80 cm. The magnification is the ratio of these two focal lengths, or 4. The answer is (A).\n\nQ: The muon decays with a characteristic lifetime of about 10^-6 second into an electron, a muon neutrino, and an electron antineutrino. The muon is forbidden from decaying into an electron and just a single neutrino by the law of conservation of\n(A) charge (B) mass (C) energy and momentum (D) lepton number\nA: Let's think step by step. Lepton number must be conserved, meaning the total number of leptons minus the number of antileptons. If a muon decays into an electron and a single neutrino, the total lepton number would go from one to two, violating lepton number conservation. The answer is (D).\n\nQ: One end of a Nichrome wire of length 2L and cross-sectional area A is attached to an end of another Nichrome wire of length L and cross- sectional area 2A. If the free end of the longer wire is at an electric potential of 8.0 volts, and the free end of the shorter wire is at an electric potential of 1.0 volt, the potential at the junction of the two wires is most nearly equal to\n(A) 2.4 V (B) 3.3 V (C) 4.5 V (D) 5.7 V\nA: Let's think step by step. This is a simple voltage divider problem, where the longer wire has a resistance four times that of the shorter end. So the voltage divider ratio is 1 / 5, meaning that the potential in the middle is 1.0 V + (8.0 V - 1.0 V) * 1/5 = 2.4 V. The answer is (A).\n\nQ: A refracting telescope consists of two converging lenses separated by 100 cm. The eye-piece lens has a focal length of 20 cm. The angular magnification of the telescope is\n(A) 4 (B) 5 (C) 6 (D) 20\nA: Let's think step by step. In a refracting telescope, if both lenses are converging, the focus of both lenses must be between the two lenses, and thus the focal lengths of the two lenses must add up to their separation. Since the focal length of one lens is 20 cm, the focal length of the other must be 80 cm. The magnification is the ratio of these two focal lengths, or 4. The answer is (A).\n\nQ: For which of the following thermodynamic processes is the increase in the internal energy of an ideal gas equal to the heat added to the gas?\n(A) Constant temperature (B) Constant volume (C) Constant pressure (D) Adiabatic\nA: Let's think step by step. Heat added to the gas can go into the gases internal energy or work done against an external force. However, if the volume of the gas container is constant, no work will be done (since work is pressure times change in volume). So, at constant volume, all of the heat goes into the internal energy. The answer is (B).", "computer_security": "The following are multiple choice questions (with answers) about computer security.\n\nQ: SHA-1 has a message digest of\n(A) 160 bits (B) 512 bits (C) 628 bits (D) 820 bits\nA: Let's think step by step. Since SHA-1 is a hash function which takes an input and produces a 160-bit (20-byte) hash value, its message digest is 160 bits. The answer is (A).\n\nQ: _____________ can modify data on your system \u2013 so that your system doesn\u2019t run correctly or you can no longer access specific data, or it may even ask for ransom in order to give your access.\n(A) IM \u2013 Trojans (B) Backdoor Trojans (C) Trojan-Downloader (D) Ransom Trojan\nA: Let's think step by step. The system is asking for trojans, which are for ransom, which means ransom trojan. The answer is (D).\n\nQ: What is ethical hacking?\n(A) \"Hacking\" ethics so they justify unintended selfish behavior (B) Hacking systems (e.g., during penetration testing) to expose vulnerabilities so they can be fixed, rather than exploited (C) Hacking into systems run by those whose ethics you disagree with (D) A slang term for rapid software development, e.g., as part of hackathons\nA: Let's think step by step. Ethical hacking is a process of detecting vulnerabilities in an application, system, or organization's infrastructure that an attacker can use to exploit an individual or organization. They use this process to prevent cyberattacks and security breaches by lawfully hacking into the systems and looking for weak points. The answer is (B).\n\nQ: The ____________ is anything which your search engine cannot search.\n(A) Haunted web (B) World Wide Web (C) Surface web (D) Deep Web\nA: Let's think step by step. The search engine searches on the Surface Web, which is the portion of the world wide web which is visible so (B,C) are wrong. The Haunted Web doesn\u2019t correspond to an internet concept. The Deep Web is the part of the World Wide Web which is not indexed. The answer is (D).\n\nQ: Exploitation of the Heartbleed bug permits\n(A) overwriting cryptographic keys in memory (B) a kind of code injection (C) a read outside bounds of a buffer (D) a format string attack\nA: Let's think step by step. The Heartbleed Bug is a serious vulnerability in the popular OpenSSL cryptographic software library. Heartbleed resulted from improper input validation (due to a missing bounds check) in the implementation of the TLS heartbeat extension. The vulnerability was classified as a buffer over-read, a situation where more data can be read than should be allowed. The answer is (C).", "conceptual_physics": "\nThe following are multiple choice questions (with answers) about conceptual physics.\n\nQ: Colors in a soap bubble result from light\n(A) converted to a different frequency (B) deflection (C) interference (D) polarization\nA: Let's think step by step. In a soap bubble film, the light bounces between the two soap-air interfaces many times, interfering with itself constructively or destructively depending on the width of the film. This results in different colors being visible. The answer is (C).\n\nQ: Compared with the mass of a uranium atom undergoing fission, the combined masses of the products after fission are\n(A) less (B) more (C) the same (D) zero\nA: Let's think step by step. Fission releases energy, which comes from the rest mass of its initial nucleus. Thus the mass of the products is less than the mass of the reactant uranium nucleus. The answer is (A).\n\nQ: Things that are equivalent according to the equivalence principle are\n(A) space and time. (B) a traveling twin and a stay-at-home twin. (C) gravity and acceleration. (D) mass and energy.\nA: Let's think step by step. Einstein\u2019s famous equivalence principle states that gravity and acceleration are equivalent. The answer is (C).\n\nQ: Which of these three elements has the most mass per nucleon?\n(A) Hydrogen (B) Iron (C) Uranium (D) Same in each\nA: Let's think step by step. Due to nuclear binding energy, the mass of an atomic nucleus is less than the sum of individual masses of the free constituent protons and neutrons; this is known as the mass defect. Hydrogen has no mass defect because it has only a single nucleon, so it will have the most mass per nucleon. The answer is (A).\n\nQ: A model airplane flies slower when flying into the wind and faster with wind at its back. When launched at right angles to the wind a cross wind its groundspeed compared with flying in still air is\n(A) the same (B) greater (C) less (D) either greater or less depending on wind speed\nA: Let's think step by step. The plane\u2019s speed in the direction of the wind is greater than it would be in the absence of wind, and its direction orthogonal to the wind is the same as it would be in the absence of the wind. The total speed, which is these two components added in quadrature, is thus greater than the speed in still air. The answer is (B).", "econometrics": "The following are multiple choice questions (with answers) about econometrics.\n\nQ: Suppose now that a researcher wishes to use information criteria to determine the optimal lag length for a VAR. 500 observations are available for the bi-variate VAR, and the values of the determinant of the variance-covariance matrix of residuals are 0.0336, 0.0169, 0.0084, and 0.0062 for 1, 2, 3, and 4 lags respectively. What is the optimal model order according to Akaike's information criterion?\n(A) 1 lag (B) 2 lags (C) 3 lags (D) 4 lags\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Let\u2019s solve this problem step by step. First of all, let\u2019s recall that for a given set of data, Akaike's information criterion (AIC) allows us to measure how well a statistical model fits the data; it is an estimator of prediction error. Here in this problem we will need to use the formula ln(det(sigma_hat)) + (2 * k / T) to determine the values of Akaike\u2019s criterion, where ln denotes the natural log function, det the determinant function, k the total number of parameters in total (across both equations), and T the number of observations (which, in this case, is equal to 500). For 1 lag, the number of parameters in total is equal to 6; for 2 lags, it is 10; for 3 lags, it is 14; and for 4 lags, it is 18. Now, let\u2019s calculate the values of the criterion for each lag:\n(A) 1 lag: ln(0.0336) + (2 * 6 / 500) = ln(0.0336) + (12 / 500) = -3.369\n(B) 2 lags: ln(0.0169) + (2 * 10 / 500) = ln(0.0169) + (20 / 500) = -4.040\n(C) 3 lags: ln(0.0084) + (2 * 14 / 500) = ln(0.0084) + (28 / 500) =-4.724\n(D) 4 lags: ln(0.0062) + (2 * 18 / 500) = ln(0.0062) + (36 / 500) =-5.011\nBecause the optimal model order according to AIC minimizes the information criterion, the answer should be the one with the lowest value. In this case, (D) has the lowest value. The answer is (C).\n\nQ: Consider the following AR(1) model with the disturbances having zero mean and unit variance\nyt = 0.2 + 0.4 yt-1 + ut\nThe (unconditional) mean of y will be given by\n(A) 0.2 (B) 0.4 (C) 0.5 (D) 0.33\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Let\u2019s solve this problem step by step. If we have a an AR(1) model with the disturbances having zero mean and unit variance, then the unconditional mean of y is equal to the following:\nunconditional mean of y = (the intercept term) / (1 - autoregressive coefficient)\nWe know that the intercept term is 0.2 and the autoregressive coefficient is 0.4; thus, we have:\nunconditional mean of y = (0.2) / (1 - 0.4) = (0.2) / (0.6) = 2 / 6 = 1 / 3, which is approximately 0.33. That means that the answer should be (D) 0.33. The answer is (D).\n\nQ: What would be then consequences for the OLS estimator if heteroscedasticity is present in a regression model but ignored?\n(A) It will be biased (B) It will be inconsistent (C) It will be inefficient (D) All of (a), (b) and (c) will be true.\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Heteroscedasticity refers to the condition where the variance of the error terms is not constant across multiple observations. If heteroscedasticity is present in a regression model, then the coefficient estimates in the OLS estimator will be not only unbiased and consistent but also inefficient. Because (A) and (B) are incorrect choices and (C) is a correct choice, (D) cannot be the right answer. Ultimately, (C) is the only true choice. The answer is (C).\n\nQ: Suppose that a test statistic has associated with it a p-value of 0.08. Which one of the following statements is true?\n(i) If the size of the test were exactly 8%, we would be indifferent between rejecting and not rejecting the null hypothesis\n(ii) The null would be rejected if a 10% size of test were used\n(iii) The null would not be rejected if a 1% size of test were used\n(iv) The null would be rejected if a 5% size of test were used.\n(A) (ii) and (iv) only (B) (i) and (iii) only (C) (i), (ii), and (iii) only (D) (i), (ii), (iii), and (iv).\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. Let\u2019s reason about each of the options.\n(i) is a true statement.\n(ii) is a true statement.\n(iii) is a true statement.\n(iv) is not a true statement. Thus, (i), (ii), and (iii) are true. The answer is (C).\n\nQ: For a stationary autoregressive process, shocks will\n(A) Eventually die away (B) Persist indefinitely (C) Grow exponentially (D) Never occur\nA: Let's think step by step. We refer to Wikipedia articles on econometrics for help. This is a formal logic problem about stationally process. For a stationary autoregressive process, shocks will eventually die away. The answer is (A).", "electrical_engineering": "\nThe following are multiple choice questions (with answers) about electrical engineering.\n\nQ: A point pole has a strength of 4\u03c0 * 10^-4 weber. The force in newtons on a point pole of 4\u03c0 * 1.5 * 10^-4 weber placed at a distance of 10 cm from it will be\n(A) 15 N. (B) 20 N. (C) 7.5 N. (D) 3.75 N.\nA: Let's think step by step. The force between two point poles is given by m_1m_2/(mu_0 4 \\pi r^2), in analogy to Coulomb\u2019s law. Plugging in the values given in the question, we calculate that the force is approximately 15 N. The answer is (A).\n\nQ: The coil of a moving coil meter has 100 turns, is 40 mm long and 30 mm wide. The control torque is 240*10-6 N-m on full scale. If magnetic flux density is 1Wb/m2 range of meter is\n(A) 1 mA. (B) 2 mA. (C) 3 mA. (D) 4 mA.\nA: Let's think step by step. The torque on a coil in a uniform magnetic field is given by BANI, where B is the magnetic flux density, A is the area of the coil, N is the number of turns, and I is the current. So we have that I = (Torque)/(BAN), or 240e-6/(1200e-6 * 100 * 1) = 2e-3. The answer is (B).\n\nQ: In an SR latch built from NOR gates, which condition is not allowed\n(A) S=0, R=0 (B) S=0, R=1 (C) S=1, R=0 (D) S=1, R=1\nA: Let's think step by step. An SR latch is a set-reset latch; in the case where S=1 and R=1, the circuit has no stable state; instead a race condition will be produced within the circuit, so the device will be in an undefined state. So S=1, R=1 is an illegal input. The answer is (D).\n\nQ: Two long parallel conductors carry 100 A. If the conductors are separated by 20 mm, the force per meter of length of each conductor will be\n(A) 100 N. (B) 0.1 N. (C) 1 N. (D) 0.01 N.\nA: Let's think step by step. The magnetic force-per-length between two current-carrying conductors is given by \\mu_0 I_1 I_2 / (2 \\pi r), where $r$ is the separation distance and I_1 and I_2 are the currents. Plugging in 100 A for I_1 and I_2, and 20 mm for r, gives 0.1 N. The answer is (B).\n\nQ: In a 2 pole lap winding dc machine , the resistance of one conductor is 2\u03a9 and total number of conductors is 100. Find the total resistance\n(A) 200\u03a9 (B) 100\u03a9 (C) 50\u03a9 (D) 10\u03a9\nA: Let's think step by step. In lap winding, effectively two resistors are connected in parallel, so the actual resistance of each pair is 1 Ohm. Since we have 50 pairs, we get a total resistance of 50 Ohms. The answer is (C).", "elementary_mathematics": "The following are multiple choice questions (with answers) about elementary mathematics.\n\nQ: Olivia used the rule \"Add 11\" to create the number pattern shown below. 10, 21, 32, 43, 54. Which statement about the number pattern is true?\n(A) The 10th number in the pattern will be an even number.\n(B) The number pattern will never have two even numbers next to each other.\n(C) The next two numbers in the pattern will be an even number then an odd number.\n(D) If the number pattern started with an odd number then the pattern would have only odd numbers in it.\nA: Let's think step by step. Choice A is incorrect because every even-numbered term in the pattern is odd, and 10 is an even number. Choice B is correct, because adding an odd number (in this case 11) to an odd number produces an even number, and adding an odd number to an even number produces an odd number. Thus the terms in the pattern will alternate between odd and even, so there will never be two even numbers next to each other. Choice C is incorrect because the last term in the example is even (54), and we know that the terms will alternate between even and odd. Choice D is incorrect because the terms in the pattern will alternate between odd and even, regardless of the value of the first term. The answer is (B).\n\nQ: The population of the city where Michelle was born is 145,826. What is the value of the 5 in the number 145,826?\n(A) 5 thousands\n(B) 5 hundreds\n(C) 5 tens\n(D) 5 ones\nA: Let's think step by step. Choice A is correct, because there are three digits following the 5, so\nthe 5 is in the thousands place. Thus the other choices are incorrect. The answer is (A).\n\nQ: A store sells 107 different colors of paint. They have 25 cans of each color in storage. The number of cans of paint the store has in storage can be found using the expression below. 107 \u00d7 25. How many cans of paint does the store have in storage?\n(A) 749\n(B) 2,675\n(C) 2,945\n(D) 4,250\nA: Let's think step by step. We can calculate 107 x 25 = (100 x 25) + (7 x 25) = 2500 + 175 = 2675. The answer is (B).\n\nQ: A total of 30 players will play basketball at a park. There will be exactly 5 players on each team. Which statement correctly explains how to find the number of teams needed?\n(A) Add 5 to 30 to find 35 teams.\n(B) Divide 30 by 5 to find 6 teams.\n(C) Multiply 30 and 5 to find 150 teams.\n(D) Subtract 5 from 30 to find 25 teams.\nA: Let's think step by step. We want to find the number of teams. We know that there are 5 players/team, and 30 players. Thus to get the number of teams we divide players by players/team, so 30 players / 5 players/team = 6 teams. The answer is (B).\n\nQ: Which expression is equivalent to 5 x 9?\n(A) (5 x 4) x (6 x 5)\n(B) (5 x 5) + (5 x 4)\n(C) (5 x 5) + (5 x 9)\n(D) (5 x 9) x (6 x 9)\nA: Let's think step by step. We know that 9 = (5 + 4), so 5 x 9 = 5 x (5 + 4) = (5 x 5) + (5 x 4). The answer is (B).", "formal_logic": "The following are multiple choice questions (with answers) about formal logic.\n\nQ: Which of the given formulas of PL is the best symbolization of the following sentence?\nTurtles live long lives and are happy creatures, unless they are injured.\n(A) (L \u2022 H) \u2261 I (B) (L \u2022 H) \u2228 I (C) L \u2022 (H \u2228 I) (D) L \u2022 (H \u2283 R).\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. Let \u201cL\u201d denote \u201cliving long\u201d, H \u201cbeing happy\u201d, and \u201cI\u201d \u201cbeing injured\u201d. Now, consider each choice:\n(A) means (living long AND being happy) is equivalent to (being injured). \n(B) means (living long AND being happy) OR (being injured). \n(C) means (living long) AND (being happy OR being injured). \n(D) means (living long) AND (being happy implies being R), but what R denotes is not clear.\nObviously, (B) is the best symbolization of the original sentence. The answer is (B).\n\nQ: Select the best translation into predicate logic.George borrows Hector's lawnmower. (g: George; h: Hector; l: Hector's lawnmower; Bxyx: x borrows y from z).\n(A) Blgh (B) Bhlg (C) Bglh (D) Bghl\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. We are told that \u201cBxyx\u201d means \u201cx borrows y from z\u201d. We can rewrite \u201cGeorge borrows Hector's lawnmower\u201d as \u201cGeorge borrows a lawnmower from Hector\u201d, which can then be translated into predicate logic as \u201cBglh\u201d. The answer \u201cBglh\u201d appears in (C); therefore, (C) must be the correct answer. The answer is (C).\n\nQ: \nSelect the best English interpretation of the given arguments in predicate logic.\nDm\n(\u2200x)(Wx \u2283 ~Dx). \n(\u2200x)Wx \u2228 Ag\t/ (\u2203x)Ax\n(A) Marina is a dancer. Some weaklings are not dancers. Either everything is a weakling or Georgia plays volleyball. So something plays volleyball. (B) Marina is a dancer. No weakling is a dancer. Everything is either a weakling or plays volleyball. So something plays volleyball. (C) Marina is a dancer. Some weaklings are not dancers. Everything is either a weakling or plays volleyball. So something plays volleyball. (D) Marina is a dancer. No weakling is a dancer. Either everything is a weakling or Georgia plays volleyball. So something plays volleyball.\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. Let \u201cD\u201d denote \u201cbeing a dancer\u201d, \u201cm\u201d denote \u201cMaria\u201d, \u201cg\u201d denote \u201cGeorgia\u201d, \u201cW\u201d denote \u201cweakling\u201d, \u201cA\u201d denote \u201cplaying volleyball\u201d. Then, we have the following:\n1. Dm \u2192 Maria is a dance.\n2. (\u2200x)(Wx \u2283 ~Dx). \u2192 For all x, if x is a weakling, then x is not a dancer. In other words, no weakling is a dancer.\n3. (\u2200x)Wx \u2228 Ag\t/ (\u2203x)Ax \u2192 For all x, x is a weakling or Georgia plays volleyball. So there exists an x that plays volleyball. \nOptions (A) and (C) do claim that some weaklings are not dancers, but the second argument strongly states that no weakling is a dancer. Thus, we can eliminate them. Option (B) omits the important detail about Georgia playing volleyball. Option (D) has all the details presented in the arguments and is the best English interpretation of the arguments. The answer is (D).\n\nQ: Select the best translation into predicate logic: No people drive on Mars.\n(A) ~Pd (B) (\u2200x)(Px \u2228 ~Dx) (C) (\u2200x)(Px \u2283 ~Dx) (D) ~Dp\nA: Let's think step by step. We refer to Wikipedia articles on formal logic for help. Let\u2019s solve this step by step. Let \u201cP\u201d denote \u201cbeing on Mars\u201d and \u201cD\u201d denote \u201cdriving on Mars\u201d. Then let\u2019s consider each option:\nOption (A): ~Pd \u2192 d is not on Mars.\nOption (B): (\u2200x)(Px \u2228 ~Dx) \u2192 For all x, x is on Mars and x do not drive on Mars.\nOption (C): (\u2200x)(Px \u2283 ~Dx) \u2192 For all x, x is on Mars implies that x do not drive on Mars.\nOption (D): ~Dp: \u2192 p do not drive on Mars.\nOf all these options, Option (C) appears to be the best and most meaningful interpretation of the argument \u201cNo people drive on Mars.\u201d The answer is (C).", "global_facts": "The following are multiple choice questions (with answers) about global facts.\n\nQ: As of 2017, how many of the world\u2019s 1-year-old children today have been vaccinated against some disease? *\n(A) 80% (B) 60% (C) 40% (D) 20%\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. According to data published by the World Health Organization, the nummber of 1-year-old children vaccinated in 2017 exceeds 80%. The answer is (A).\n\nQ: As of 2019, about what percentage of Americans agree that the state is run for the benefit of all the people?\n(A) 31% (B) 46% (C) 61% (D) 76%\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. In 2019, about 46% percentage of Americans agree that the state is run for the benefit of all the people. The answer is (B).\n\nQ: As of 2019, about what percentage of Russians say it is very important to have free media in our country without government/state censorship?\n(A) 38% (B) 53% (C) 68% (D) 83%\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. As of 2019, about 38% of Russians say it is very important to have free media in our country. The answer is (A).\n\nQ: As of 2015, since 1990 forests have ____ in Europe and have ____ in Africa and the Americas.\n(A) increased, increased (B) increased, decreased (C) decreased, increased (D) decreased, decreased\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. As of 2015, since 1990 forests have increased in Europe and have decreased in Africa and the Americas. The answer is (B).\n\nQ: Which of the following pairs of statements are both true (as of 2019)?\n(A) People tend to be optimistic about their own future and the future of their nation or the world. (B) People tend to be optimistic about their own future but pessimistic about the future of their nation or the world. (C) People tend to be pessimistic about their own future but optimistic about the future of their nation or the world. (D) People tend to be pessimistic about their own future and the future of their nation or the world.\nA: Let's think step by step. We refer to Wikipedia articles on global facts for help. As of 2019, most people tend to be optimistic about their own future but pessimistic about the future of their nation or the world. The answer is (B).", "high_school_biology": "The following are multiple choice questions (with answers) about high school biology.\n\nQ: In animal cells, which of the following represents the most likely pathway that a secretory protein takes as it is synthesized in a cell?\n(A) Plasma membrane\u2013Golgi apparatus\u2013ribosome\u2013secretory vesicle\u2013rough ER (B) Ribosome\u2013Golgi apparatus\u2013rough ER\u2013secretory vesicle\u2013plasma membrane (C) Plasma membrane\u2013Golgi apparatus\u2013ribosome\u2013secretory vesicle\u2013rough ER (D) Ribosome\u2013rough ER\u2013Golgi apparatus\u2013secretory vesicle\u2013plasma membrane\nA: Let's think step by step. Protein synthesis starts at the ribosome, so we can eliminate (A) and (C). The ribosome is often in the endoplasmic reticulum and moves from there to the Golgi apparatus, where it is modified and packaged into a vesicle. The vesicle then floats to the plasma membrane and is secreted. The answer is (D).\n\nQ: A mutation in a bacterial enzyme changed a previously polar amino acid into a nonpolar amino acid. This amino acid was located at a site distant from the enzyme\u2019s active site. How might this mutation alter the enzyme\u2019s substrate specificity?\n(A) By changing the enzyme\u2019s pH optimum (B) By changing the enzyme\u2019s location in the cell (C) By changing the shape of the protein (D) An amino acid change away from the active site cannot alter the enzyme\u2019s substrate specificity.\nA: Let's think step by step. A change in an amino acid leads to a change in the primary structure of the protein. A change in the primary structure may lead to a change in the secondary and the tertiary structure of the protein. A change in the tertiary structure means a change in the shape of the protein, so (C) has to be correct. Since the change does not affect the active site of the enzyme, we do not expect the activity of the enzyme to be affected. The answer is (C).\n\nQ: Which of the following is not a way to form recombinant DNA?\n(A) Translation (B) Conjugation (C) Specialized transduction (D) Transformation\nA: Let's think step by step. The introduction of foreign DNA or RNA into bacteria or eukaryotic cells is a common technique in molecular biology and scientific research. There are multiple ways foreign DNA can be introduced into cells including transformation, transduction, conjugation, and transfection. In contrast, (A) is not a way to form DNA: during translation the ribosomes synthesize proteins from RNA. The answer is (A).\n\nQ: Homologous structures are often cited as evidence for the process of natural selection. All of the following are examples of homologous structures EXCEPT\n(A) the wings of a bird and the wings of a bat (B) the flippers of a whale and the arms of a man (C) the pectoral fins of a porpoise and the flippers of a seal (D) the forelegs of an insect and the forelimbs of a dog\nA: Let's think step by step. \u200b\u200bHomologous structures are similar physical features in organisms that share a common ancestor \u200b\u200bbut different functions. Comparisons (B) and (C) are clearly homologous because they share a common ancestor and the structures serve different purposes. Bat wings and birg wings are also homologous, while they are both wings, the forelimbs serve different purposes. Insects and dogs are very far ancestors since one is vertebrate while the other is invertebrate and the forelimbs serve the same purpose, so they are not homologous. The answer is (D).\n\nQ: Which of the following is not known to be involved in the control of cell division?\n(A) Cyclins (B) Protein kinases (C) Checkpoints (D) Fibroblast cells\nA: Let's think step by step. Normal cells move through the cell cycle in a regulated way. At the checkpoint stage, they use information about their own internal state and cues from the environment around them to decide whether to proceed with cell division. Cues like these act by changing the activity of core cell cycle regulators inside the cell. The most common regulators are cyclins and cyclin-dependent kinases. Fibroblast cells do not play any role in cell division. The answer is (D).", "high_school_chemistry": "The following are multiple choice questions (with answers) about high school chemistry.\n\nQ: Which of the following is considered an acid anhydride?\n(A) HCl (B) H2SO3 (C) SO2 (D) Al(NO3)3\nA: Let's think step by step. An acid anhydride is a compound that is derived by removing water from an acid. The chemical formula for water is H2O, which means that we need to determine which of these options, when combined with H2O, forms an acid. SO2, or Sulfur dioxide, when combined with H2O, makes H2SO4, or sulfuric acid. The answer is (C).\n\nQ: Which of the following is expected to be a polar molecule?\n(A) PCl4F (B) BF3 (C) CO2 (D) Si(CH3)4\nA: Let's think step by step. A polar molecule is one that has a slightly positive charge on one end of the molecule and a slightly negative charge on the other end. Boron trifluoride (BF3) has Boron as the center atom and three fluorine atoms attached to it; it is trigonal planar and symmetric, so it is nonpolar. Carbon Dioxide (CO2) has Carbon as the central atom with double bonds to two Oxygen atoms - this is also symmetrical and therefore nonpolar. The same is the case for tetramethyl silane (SI(CH3)4), which is a Silicon atom surrounded by four methyl groups. The structure of PCL4F is that Phosphorus is the central atom, attached to four chlorines and one fluorine atom. This is asymmetrical, and therefore has a net dipole and is expected to be a polar molecule. The answer is (A).\n\nQ: From the solubility rules, which of the following is true?\n(A) All chlorides, bromides, and iodides are soluble (B) All sulfates are soluble (C) All hydroxides are soluble (D) All ammonium-containing compounds are soluble\nA: Let's think step by step. The chlorides, bromides, and iodides of lead, silver, and mercury are not soluble in water. This rules out (A). The sulfates of lead, barium, and calcium are not soluble in water, which rules out (B). The hydroxides of any metal besides sodium, potassium, ammonium, calcium, and barium are insoluble. This rules out (C). Typically ammonium ions indicate a soluble ionic substance. The answer is (D).\n\nQ: A new compound is synthesized and found to be a monoprotic acid with a molar mass of 248 g/mol. When 0.0050 mol of this acid are dissolved in 0.500 L of water, the pH is measured as 3.89. What is the pKa of this acid?\n(A) 3.89 (B) 7.78 (C) 5.78 (D) 2.33\nA: Let's think step by step. Recall that $[A] = [H^{+}]$. Here, this is equal to $$10^{-3.89}$. Then we have $K_{a} = $\nrac{[H^{+}][A^{-}]}{[HA]} = \nrac{10^{-3.89} \\cdot 10^{-3.89}}{10^{-2}}. The resulting exponent is $-3.89 + (-3.89) - (-2) = 5.78$, therefore $K_a = 10^{-5.78}$. The $pK_a$ is the negative log of $K_a$, which is equal to $5.78$. The answer is (C).\n\nQ: A solution contains 2.00 mole of acetic acid, CH3COOH, and 1.00 mole of calcium acetate, Ca(CH3COO)2. The solution is able to resist the addition of a small amount of strong acid or strong base with only minor changes in the pH of the solution. Larger quantities of strong acid or strong base can cause a significant change in pH. How many moles of nitric acid, HNO3, may be added before the pH begins to change significantly?\n(A) 0.500 mole (B) 1.00 mole (C) 2.00 mole (D) 3.00 mole\nA: Let's think step by step. We would like to compute the buffer capacity of this solution. First we write the equation for the ionization of the weak acid, in this case of acetic acid. $CH_{3}COOH (aq) + H_{2}O \nightarrow H_{3}O^{+} + CH3COO^{-}$. The conjugate base is therefore the acetate ion. The added strong acid, Nitric acid, will react with the conjugate base. Therefore the maximum amount of acid that can be added will be equal to the amount of acetate ion, or 2 moles. The answer is (C).", "high_school_computer_science": "The following are multiple choice questions (with answers) about high school computer science.\n\nQ: Which of the following is an example of the use of a device on the Internet of Things (IoT) ?\n(A) A car alerts a driver that it is about to hit an object. (B) A hiker uses a G P S watch to keep track of her position. (C) A refrigerator orders milk from an online delivery service when the milk in the refrigerator is almost gone. (D) A runner uses a watch with optical sensors to monitor his heart rate.\nA: Let's think step by step. The term Internet of Things (IoT) refers to common devices which are connected to the internet, enabling new functionality. Choice A is incorrect because it does not describe an internet connected device. In choice B, the watch is only described as having GPS functionality but no internet connectivity. Choice C describes a common device (a refrigerator) which has internet connectivity enabling new functionality (online ordering). Choice D does not mention internet connectivity for the watch, only optical sensors. The answer is (C).\n\nQ: Many Web browsers allow users to open anonymous windows. During a browsing session in an anonymous window, the browser does not record a browsing history or a list of downloaded files. When the anonymous window is exited, cookies created during the session are deleted. Which of the following statements about browsing sessions in an anonymous window is true?\n(A) The activities of a user browsing in an anonymous window will not be visible to people who monitor the user's network, such as the system administrator. (B) Items placed in a Web store's shopping cart for future purchase during the anonymous browsing session will not be saved on the user's computer. (C) A user will not be able to log in to e-mail or social media accounts during the anonymous browsing session. (D) A user browsing in an anonymous window will be protected from viruses launched from any web sites visited or files downloaded.\nA: Let's think step by step. Choice A is incorrect as it only describes network traffic, which an anonymous browser does not change. Choice B is correct as it correctly describes how an anonymous browser will prevent saving data on the user\u2019s computer after the session is ended. Choice C is incorrect because an anonymous browser will not prevent logging in to email or social media accounts. Choice D is incorrect because an anonymous browser in itself performs no virus protection. The answer is (B).\n\nQ: In the program below, the initial value of X is 5 and the initial value of Y is 10.\nIF (X < 0){\n DISPLAY (\"Foxtrot\")\n} ELSE {\n IF (X > Y){\n DISPLAY (\"Hotel\")\n } ELSE {\n IF (Y > 0){\n DISPLAY (\"November\")\n } ELSE {\n DISPLAY (\"Yankee\")\n }\n }\n}\nWhat is displayed as a result of running the program?\n(A) Foxtrot (B) Hotel (C) November (D) Yankee\nA: Let's think step by step. Because X has the value 5, the first conditional IF (X < 0) is false, so we move to the first ELSE clause. Because X is 5 and Y is 10, the second conditional IF (X > Y) is false, so we move to the following ELSE clause. Since Y is 10, the conditional IF (Y > 0) is true, so the command DISPLAY (\"November\") is executed. The answer is (C).\n\nQ: What is the output of \"abc\"[::-1] in Python 3?\n(A) Error (B) abc (C) cba (D) c\nA: Let's think step by step. We know that the slicing operator [::-1] takes all of the elements in the string in reverse order, so we reverse the order of the string \"abc\", resulting in \"cba\". The answer is (C).\n\nQ: A list of numbers has n elements, indexed from 1 to n. The following algorithm is intended to display the number of elements in the list that have a value greater than 100. The algorithm uses the variables count and position. Steps 3 and 4 are missing.\n Step 1: Set count to 0 and position to 1.\n Step 2: If the value of the element at index position is greater than 100, increase the value of count by 1.\n Step 3: (missing step)\n Step 4: (missing step)\n Step 5: Display the value of count.\nWhich of the following could be used to replace steps 3 and 4 so that the algorithm works as intended?\n(A) Step 3: Increase the value of position by 1.\n Step 4: Repeat steps 2 and 3 until the value of count is greater than 100.\n(B) Step 3: Increase the value of position by 1.\n Step 4: Repeat steps 2 and 3 until the value of position is greater than n.\n(C) Step 3: Repeat step 2 until the value of count is greater than 100.\n Step 4: Increase the value of position by 1.\n(D) Step 3: Repeat step 2 until the value of position is greater than n.\n Step 4: Increase the value of count by 1.\nA: Let's think step by step. Choice A is incorrect, because its Step 4 has an incorrect termination condition, stopping when count is greater than 100. We need to stop after inspecting all elements in the list. Choice B is correct because it correctly increments both count and position, and correctly repeats these steps and terminates when all elements in the list have been inspected. Choice C is incorrect because it incorrectly increments the variable count until its value is greater than 100, regardless of the elements in the list. Choice D is incorrect because its step 3 does not increment the value of position, so it will repeat forever. The answer is (B).", "high_school_european_history": "The following are multiple choice questions (with answers) about high school european history.\n\nQ: This question refers to the following information.\nAlbeit the king's Majesty justly and rightfully is and ought to be the supreme head of the Church of England, and so is recognized by the clergy of this realm in their convocations, yet nevertheless, for corroboration and confirmation thereof, and for increase of virtue in Christ's religion within this realm of England, and to repress and extirpate all errors, heresies, and other enormities and abuses heretofore used in the same, be it enacted, by authority of this present Parliament, that the king, our sovereign lord, his heirs and successors, kings of this realm, shall be taken, accepted, and reputed the only supreme head in earth of the Church of England, called Anglicans Ecclesia; and shall have and enjoy, annexed and united to the imperial crown of this realm, as well the title and style thereof, as all honors, dignities, preeminences, jurisdictions, privileges, authorities, immunities, profits, and commodities to the said dignity of the supreme head of the same Church belonging and appertaining; and that our said sovereign lord, his heirs and successors, kings of this realm, shall have full power and authority from time to time to visit, repress, redress, record, order, correct, restrain, and amend all such errors, heresies, abuses, offenses, contempts, and enormities, whatsoever they be, which by any manner of spiritual authority or jurisdiction ought or may lawfully be reformed, repressed, ordered, redressed, corrected, restrained, or amended, most to the pleasure of Almighty God, the increase of virtue in Christ's religion, and for the conservation of the peace, unity, and tranquility of this realm; any usage, foreign land, foreign authority, prescription, or any other thing or things to the contrary hereof notwithstanding.\nEnglish Parliament, Act of Supremacy, 1534\nFrom the passage, one may infer that the English Parliament wished to argue that the Act of Supremacy would\n(A) give the English king a new position of authority (B) give the position of head of the Church of England to Henry VIII alone and exclude his heirs (C) establish Calvinism as the one true theology in England (D) end various forms of corruption plaguing the Church in England\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The Act of Supremacy states that it grants authority to the king \"to repress and extirpate all errors, heresies, and other enormities and abuses\", referring to the corruption in the Church of England. The answer is (D).\n\nQ: This question refers to the following information.\nRead the following excerpt.\nThe revolutionary seed had penetrated into every country and spread more or less. It was greatly developed under the r\u00e9gime of the military despotism of Bonaparte. His conquests displaced a number of laws, institutions, and customs; broke through bonds sacred among all nations, strong enough to resist time itself; which is more than can be said of certain benefits conferred by these innovators.\nThe monarchs will fulfil the duties imposed upon them by Him who, by entrusting them with power, has charged them to watch over the maintenance of justice, and the rights of all, to avoid the paths of error, and tread firmly in the way of truth. Placed beyond the passions which agitate society, it is in days of trial chiefly that they are called upon to despoil realities of their false appearances, and to show themselves as they are, fathers invested with the authority belonging by right to the heads of families, to prove that, in days of mourning, they know how to be just, wise, and therefore strong, and that they will not abandon the people whom they ought to govern to be the sport of factions, to error and its consequences, which must involve the loss of society.\nUnion between the monarchs is the basis of the policy which must now be followed to save society from total ruin. . . .\nLet them not confound concessions made to parties with the good they ought to do for their people, in modifying, according to their recognized needs, such branches of the administration as require it.\nLet them be just, but strong; beneficent, but strict.\nLet them maintain religious principles in all their purity, and not allow the faith to be attacked and morality interpreted according to the social contract or the visions of foolish sectarians.\nLet them suppress Secret Societies; that gangrene of society.\n\u2014Klemens von Metternich, Political Confession of Faith, 1820\nWhich of the following was the greatest cause of the fears expressed by Metternich in the document above?\n(A) The ideas of personal liberty and nationalism conceived during the Enlightenment resulted in radical revolutions that could spread throughout Europe. (B) The conquest of Europe by Napoleon led to the creation of new factions and shifted the European balance of power. (C) The power of monarchs had grown to the point where it needed to be checked by other powers within each nation or domination of civilians would occur. (D) The rising and falling economic cycle of the newly emerging capitalist economy could lead to civilian unrest that must be suppressed.\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The fears of revolution in early 19th century Europe expressed by Klemens von Metternich, a conservative Austrian statesman, were a direct result of the age of Enlightenment, a period of European history where the absolute power of the monarchy was challenged with ideas of individual liberty and nationalism, leading to the French revolution and its effects all over Europe. The answer is (A).\n\nQ: This question refers to the following information.\nThe excerpts below are from the Navigation Acts of 1651.\n[A]fter the first day of December, one thousand six hundred fifty and one, and from thence forwards, no goods or commodities whatsoever of the growth, production or manufacture of Asia, Africa or America, or of any part thereof; or of any islands belonging to them, or which are described or laid down in the usual maps or cards of those places, as well of the English plantations as others, shall be imported or brought into this Commonwealth of England, or into Ireland, or any other lands, islands, plantations, or territories to this Commonwealth belonging, or in their possession, in any other ship or ships, vessel or vessels whatsoever, but only in such as do truly and without fraud belong only to the people of this Commonwealth, or the plantations thereof, as the proprietors or right owners thereof; and whereof the master and mariners are also of the people of this Commonwealth, under the penalty of the forfeiture and loss of all the goods that shall be imported contrary to this act, , , ,\n[N]o goods or commodities of the growth, production, or manufacture of Europe, or of any part thereof, shall after the first day of December, one thousand six hundred fifty and one, be imported or brought into this Commonwealth of England, or any other lands or territories to this Commonwealth belonging, or in their possession, in any ship or ships, vessel or vessels whatsoever, but in such as do truly and without fraud belong only to the people of this Commonwealth, and in no other, except only such foreign ships and vessels as do truly and properly belong to the people of that country or place, of which the said goods are the growth, production or manufacture.\nWhich of the following best describes the outcome of the Navigation Acts of 1651?\n(A) They served as a catalyst for the growth of English shipping and overseas trade, but did little to limit the prospects of the Dutch in the seventeenth century. (B) They brought about almost immediate hardships for the Dutch economy as their dominance of overseas trade quickly ended. (C) They were rescinded during the restoration of the Stuarts as they sought normal diplomatic relations with the Dutch so not as to need Parliament's financial support for war. (D) They led to nearly a century of recurrent war between England and the Netherlands, which would not end until after American independence.\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The Navigation Acts of 1651 helped English shipping by restricting the ability of ships from other European countries, especially the Dutch, to transport goods from colonies in Asia and Africa into England. The answer is (A).\n\nQ: This question refers to the following information.\nIn Russia there was nothing going on well, and [Souvarine] was in despair over the news he had received. His old companions were all turning to the politicians; the famous Nihilists who made Europe tremble-sons of village priests, of the lower middle class, of tradesmen-could not rise above the idea of national liberation, and seemed to believe that the world would be delivered-when they had killed their despot&\u2026\n\"Foolery! They'll never get out of it with their foolery.\"\nThen, lowering his voice still more, in a few bitter words he described his old dream of fraternity. He had renounced his rank and his fortune; he had gone among workmen, only in the hope of seeing at last the foundation of a new society of labour in common. All the sous in his pockets had long gone to the urchins of the settlement; he had been as tender as a brother with the colliers, smiling at their suspicion, winning them over by his quiet workmanlike ways and his dislike of chattering. But decidedly the fusion had not taken place.\nHis voice changed, his eyes grew bright, he fixed them on \u00e9tienne, directly addressing him:\n\"Now, do you understand that? These hatworkers at Marseilles who have won the great lottery prize of a hundred thousand francs have gone off at once and invested it, declaring that they are going to live without doing anything! Yes, that is your idea, all of you French workmen; you want to unearth a treasure in order to devour it alone afterwards in some lazy, selfish corner. You may cry out as much as you like against the rich, you haven't got courage enough to give back to the poor the money that luck brings you. You will never be worthy of happiness as long as you own anything, and your hatred of the bourgeois proceeds solely from an angry desire to be bourgeois yourselves in their place.\"\n\u00e9mile Zola, French writer, Germinal, 1885\nThe passage displays the direct concern for the welfare of the working classes that was typically a part of which movement?\n(A) Capitalist (B) Scientific (C) Communist (D) Existentialist\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. The modern Communist movement aims to establish a classless society based on communal ownership and distribution of property and means of production, thereby especially benefiting the working classes. The answer is (C).\n\nQ: This question refers to the following information.\nThe following excerpt is from a pamphlet.\nYou will do me the justice to remember, that I have always strenuously supported the Right of every man to his own opinion, however different that opinion might be to mine. He who denies to another this right, makes a slave of himself to his present opinion, because he precludes himself the right of changing it.\nThe most formidable weapon against errors of every kind is Reason. I have never used any other, and I trust I never shall.\nThe circumstance that has now taken place in France of the total abolition of the whole national order of priesthood, and of everything appertaining to compulsive systems of religion, and compulsive articles of faith, has not only precipitated my intention, but rendered a work of this kind exceedingly necessary, lest in the general wreck of superstition, of false systems of government, and false theology, we lose sight of morality, of humanity, and of the theology that is true.\nI believe in one God, and no more; and I hope for happiness beyond this life.\nI believe in the equality of man; and I believe that religious duties consist in doing justice, loving mercy, and endeavoring to make our fellow-creatures happy.\nI do not believe in the creed professed by the Jewish church, by the Roman church, by the Greek church, by the Turkish church, by the Protestant church, nor by any church that I know of. My own mind is my own church.\nAll national institutions of churches, whether Jewish, Christian or Turkish, appear to me no other than human inventions, set up to terrify and enslave mankind, and monopolize power and profit.\nI do not mean by this declaration to condemn those who believe otherwise; they have the same right to their belief as I have to mine.\n\u2014Thomas Paine, The Age of Reason, 1794\u20131795\nWhich of the following Enlightenment philosophes designed a system of checks and balances for government to avoid abuses of power?\n(A) Jean Jacques Rousseau (B) Baron Montesquieu (C) Mary Wollstonecraft (D) Adam Smith\nA: Let's think step by step. We refer to Wikipedia articles on european history for help. Baron Montesquieu was a 18th centrury French philsopher who wrote extensively against the monoplization of power and advocated for a system of checks and balances in government to prevent the rise of despotism. The answer is (B).", "high_school_geography": "The following are multiple choice questions (with answers) about high school geography.\n\nQ: Which one of the following items is an example of nonmaterial culture?\n(A) Dove soap (B) Dove candy bar (C) Dove symbol (D) A dove (bird).\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. Nonmaterial culture consists of cultural ideas, beliefs or symbols that are not physical objects. The answer is (C).\n\nQ: During the third stage of the demographic transition model, which of the following is true?\n(A) Birth rates increase and population growth rate is less rapid. (B) Birth rates decline and population growth rate is less rapid. (C) Birth rates increase and population growth rate increases. (D) Birth rates decrease and population growth rate increases.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. The demographic transition model models the five different stages of population growth as a country goes through economic development, where the third stage refers to a period of declining birth rates and lower population growth. The answer is (B).\n\nQ: The practice of hiring a foreign third-party service provider to run an operation is called\n(A) outsourcing. (B) offshoring. (C) maquiladoras. (D) locational interdependence.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. \"Offshoring\" literally means to move or base some of the activities or processes of a company to a foreign country. The answer is (B).\n\nQ: Which of the following statements is NOT accurate regarding the services provided by local governments in the United States?\n(A) Duplication of efforts occurs often. (B) Social problems of the central city spill over into the surrounding residential suburbs. (C) Inefficiency in providing services occurs often. (D) One neighborhood's efforts to reduce pollution are always supported by neighboring communities.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. There may be economic, social or political reasons for two neighboring communities and their local governments not agreeing to pollution reduction efforts initiated by one of them. The answer is (D).\n\nQ: The rate of natural increase of a population is found by subtracting the\n(A) crude death rate from the crude birth date. (B) crude birth rate from the crude death rate. (C) doubling time from the crude birth rate. (D) fertility rate from the crude death rate.\nA: Let's think step by step. We refer to Wikipedia articles on geography for help. The difference between number of births and deaths gives the population increase at any given time. The answer is (A).", "high_school_government_and_politics": "The following are multiple choice questions (with answers) about high school government and politics.\n\nQ: Which of the following best states an argument made by James Madison in The Federalist number 10?\n(A) Honest politicians can prevent factions from developing. (B) Factions are more likely to occur in large republics than in small ones. (C) The negative effects of factionalism can be reduced by a republican government. (D) Free elections are the people's best defense against factionalism.\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. In the Federalist number 10, James Madison advocated for a representative republican form of government to guard against factionalism. The answer is (C).\n\nQ: The term \"budget deficit\" refers to the\n(A) annual increase in federal spending on the military (B) amount of interest on the national debt (C) difference between the initial budget proposals made by the president and Congress (D) amount the government spends in excess of its revenues\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. When the goverment spends more than it earns, their difference is the budget deficit. The answer is (D).\n\nQ: Which of the following statements about cabinet departments is FALSE?\n(A) They are established by the legislative branch. (B) Their members often don't have much influence over presidential decisions. (C) They cannot all be run by leaders who belong to the same political party the president does. (D) Not every federal agency is a cabinet department.\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. There is no law stipulating that some cabinet department leaders have to belong to a political party different from that of the president. The answer is (C).\n\nQ: Which of the following cases established the precedent that a defendant must be informed of the right to remain silent, the right to a lawyer, and protection from self-incrimination?\n(A) Weeks v. United States (B) Betts v. Brady (C) Mapp v. Ohio (D) Miranda v. Arizona\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. In the landmark Miranda v. Arizona in 1966, the US Supreme Court, based on the Fifth and Sixth Amendment of the US Constitution, guaranteed a defendant's right to an attorney and protection from self-incrimination. The answer is (D).\n\nQ: Uncertainty over the limits to presidential power is caused primarily by the fact that\n(A) the constitutional definition of those powers is broad and unspecific (B) most people agree that the Constitution places too many limits on presidential power (C) the Supreme Court consistently refuses to rule on cases concerning presidential powers (D) constitutional amendments have greatly increased presidential powers\nA: Let's think step by step. We refer to Wikipedia articles on government and politics for help. The US Constitution is not very specific about the powers of the president, leading to uncertainty over its limits. The answer is (A).", "high_school_macroeconomics": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\nQ: Which of the following policies best describes supply-side fiscal policy?\n(A) An increase in the money supply (B) Increased government spending (C) Lower taxes on research and development of new technology (D) Higher taxes on household income\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. Supply-side fiscal policy stimulates the economy by encouraging more production of goods and services through reduction in taxes and deregulation. The answer is (C).\n\nQ: The short-run Phillips curve indicates a\n(A) direct relation between unemployment and inflation (B) direct relation between price and quantity demanded (C) inverse relation between price and quantity demanded (D) inverse relation between unemployment and inflation\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. The short-run Phillips curve shows that whenever unemployment decreases below a natural level, the inflation starts increasing, and vice-versa. The answer is (D).\n\nQ: Holding all else equal which of the following monetary policies would be used to boost U.S. exports?\n(A) Increasing the discount rate (B) Increasing the reserve ratio (C) Buying government securities (D) Lowering tariffs\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. Buying government securities leads to reduction in demand for US dollars from foreign buyers, thereby making it cheaper and hence making US exports more attractive. The answer is (C).\n\nQ: A federal deficit occurs when\n(A) exports exceed imports. (B) imports exceed exports. (C) federal tax collections exceed spending. (D) federal spending exceeds federal tax revenues.\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. A federal deficit occurs when federal spending exceeds federal income which is primarily from tax revenues. The answer is (D).\n\nQ: Which of the following is not included in the U.S. GDP?\n(A) The U.S. military opens a new base in a foreign country with 1000 U.S. personnel. (B) Japanese consumers buy thousands of CDs produced in the United States. (C) An American pop singer performs a sold-out concert in Paris. (D) A French theatrical production tours dozens of American cities.\nA: Let's think step by step. We refer to Wikipedia articles on macroeconomics for help. The economic transactions related to the performance of the American pop-singer in Paris happens entirely outside the U.S. and hence is not included in the GDP numbers. The answer is (C).", "high_school_mathematics": "The following are multiple choice questions (with answers) about high school mathematics.\n\nQ: Simplify and write the result with a rational denominator: $$\\sqrt{\\sqrt[3]{\\sqrt{\\frac{1}{729}}}}$$\n(A) \\frac{3\\sqrt{3}}{3} (B) \\frac{1}{3} (C) \\sqrt{3} (D) \\frac{\\sqrt{3}}{3}\nA: Let's think step by step. Factoring $729=3^6$ and combining the roots $\\frac{1}{2}\\frac{1}{3}\\frac{1}{2}=\\frac{1}{12}$, we get that $\\sqrt{\\sqrt[3]{\\sqrt{\\frac{1}{729}}}}=\\left(\\frac{1}{3^6}\\right)^{\\frac{1}{12}}=\\frac{1}{3^{\\frac{1}{2}}}=\\frac{3}{\\sqrt{3}}$ The answer is (D).\n\nQ: Five thousand dollars compounded annually at an $x\\%$ interest rate takes six years to double. At the same interest rate, how many years will it take $\\$300$ to grow to $\\$9600$?\n(A) 12 (B) 1 (C) 30 (D) 5\nA: Let's think step by step. To go from $\\$300$ to $\\$9600$, the value must go up by a factor of $9600/300=32=2^5$. Since at this interest rate it takes six years for it to double, it will take $5*6=30$ years to grow to $\\$9600$. The answer is (C).\n\nQ: Ten students take a biology test and receive the following scores: 45, 55, 50, 70, 65, 80, 40, 90, 70, 85. What is the mean of the students\u2019 test scores?\n(A) 55 (B) 60 (C) 62 (D) 65\nA: Let's think step by step. There are 10 students and the sum of their scores is $45 + 55 + 50 + 70 + 65 + 80 + 40 + 90 + 70 + 85 = 650$, the mean is $650/10=65$. The answer is (D).\n\nQ: The variable $x$ varies directly as the square of $y$, and $y$ varies directly as the cube of $z$. If $x$ equals $-16$ when $z$ equals 2, what is the value of $x$ when $z$ equals $\\frac{1}{2}$?\n(A) -1 (B) 16 (C) -\\frac{1}{256} (D) \\frac{1}{16}\nA: Let's think step by step. We know that $x \\propto y^2$ and $y \\propto z^3$, so $x = k z^6$ for some constant $k$. Plugging in for $x=-16$ and $z=2$, the constant value is $k=\\frac{x}{z^6}=\\frac{-16}{64}=-\\frac{1}{4}$. So, when $z=\\frac{1}{2}$, the value of $x$ is $x=kz^6=-\\frac{1}{4}\\frac{1}{2^6}=-\\frac{1}{256}$. The answer is (C).\n\nQ: Joe was in charge of lights for a dance. The red light blinks every two seconds, the yellow light every three seconds, and the blue light every five seconds. If we include the very beginning and very end of the dance, how many times during a seven minute dance will all the lights come on at the same time? (Assume that all three lights blink simultaneously at the very beginning of the dance.)\n(A) 3 (B) 15 (C) 6 (D) 5\nA: Let's think step by step. The least common multiple of 2, 3 and 5 is 30, so during a 7 minute dance, all the three lights will come on at the same time $2*7+1=15$ times. The answer is (B).", "high_school_microeconomics": "The following are multiple choice questions (with answers) about high school microeconomics.\n\nQ: Which of the following is necessarily a characteristic of oligopoly?\n(A) Free entry into and exit from the market (B) A few large producers (C) One producer of a good with no close substitutes (D) A homogenous product\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. An oligopoly is when a market is dominated by just one or a few number of sellers or producers. To get oligopoly, the market should have high barriers to new entry, and the product has differentiation. The answer is (B).\n\nQ: If the government subsidizes producers in a perfectly competitive market, then\n(A) the demand for the product will increase (B) the demand for the product will decrease (C) the consumer surplus will increase (D) the consumer surplus will decrease\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. (A) and (B) are wrong because the demand curve does not change at all. If the government subsidizes producers, the supply will increase, and thus the consumer surplus also increases. The answer is (C).\n\nQ: Which of the following is true of a price floor?\n(A) The price floor shifts the demand curve to the left. (B) An effective floor creates a shortage of the good. (C) The price floor shifts the supply curve of the good to the right. (D) To be an effective floor, it must be set above the equilibrium price.\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. Price floor does not shift the demand or shift curve. An effective price floor should be set above the equilibrium price, otherwise the market bears and the floor does not have effective effect. The answer is (D).\n\nQ: The concentration ratio for a monopoly is\n(A) 0 (B) 5 (C) 10 (D) 100\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. The concentration ratio is calculated as the sum of market share of a specific number of largest companies. Monopoly means one company or entity controls the entire market, therefore, the concentration ratio is 100 percent. The answer is (D).\n\nQ: In a competitive labor market for housepainters, which of the following would increase the demand for housepainters?\n(A) An effective minimum wage imposed on this labor market. (B) An increase in the price of gallons of paint. (C) An increase in the construction of new houses. (D) An increase in the price of mechanical painters so long as the output effect exceeds the substitution effect.\nA: Let's think step by step. We refer to Wikipedia articles on microeconomics for help. An increase in the construction of new houses means an increase demand of in-house painting, thus increases the demand for housepainters. The answer is (C).", "high_school_physics": "The following are multiple choice questions (with answers) about high school physics.\n\nQ: A microwave oven is connected to an outlet, 120 V, and draws a current of 2 amps. At what rate is energy being used by the microwave oven?\n(A) 10 W (B) 30 W (C) 60 W (D) 240 W\nA: Let's think step by step. Rate of energy usage is known as power; in an dissipative electrical circuit, power is given by voltage times current. So in our case, the power is 120 V times 2 amps, or 240 W. The answer is (D).\n\nQ: A point charge, Q = +1 mC, is fixed at the origin. How much work is required to move a charge, Q = +8 \u00b5C, from the point (0, 4 meters) to the point (3 meters, 0)?\n(A) 3.5 J (B) 6.0 J (C) 22.5 J (D) 40 J\nA: Let's think step by step. To calculate the work required to move a charge from one location to another in a fixed electric field, it is enough to calculate the potential difference between the two locations. Here, the potential only depends on the distance between the charges; it\u2019s $k q_1 q_2 / r$, where $k$ is Coulomb\u2019s constant. Plugging in values $q_1 = $ 1 mC, $q_2 = 8 \\mu$ C, gives the answer as 5.992 J, which rounds to 6 J. The answer is (B).\n\nQ: Which of the following conditions will ensure that angular momentum is conserved? I. Conservation of linear momentum II. Zero net external force III. Zero net external torque\n(A) I and II only (B) I and III only (C) II and III only (D) III only\nA: Let's think step by step. Torque is defined as the change in angular momentum; if there is zero external torque, angular momentum is conserved. The answer is (D).\n\nQ: A photocell of work function \u03d5 = 2eV is connected to a resistor in series. Light of frequency f = 1 \u00d7 10^15 Hz hits a metal plate of the photocell. If the power of the light is P = 100 W, what is the current through the resistor?\n(A) 2:00 AM (B) 6:00 AM (C) 12:00 AM (D) 24 A\nA: Let's think step by step. The only answer above which has units of current is D, 24 A. The answer is (D).\n\nQ: A pipe full of air is closed at one end. A standing wave is produced in the pipe, causing the pipe to sound a note. Which of the following is a correct statement about the wave\u2019s properties at the closed end of the pipe?\n(A) The pressure is at a node, but the particle displacement is at an antinode. (B) The pressure is at an antinode, but the particle displacement is at a node. (C) The pressure and the particle displacement are both at nodes. (D) The pressure and the particle displacement are both at antinodes.\nA: Let's think step by step. At the closed end of the pipe, the particles cannot have any net displacement because the pipe closure stops them. So the particle displacement is at a node. This closure also causes the pressure to be maximal, i.e. an antinode. The answer is (B).", "high_school_psychology": "The following are multiple choice questions (with answers) about high school psychology.\n\nQ: Pascale is interested in the processing strategies children use to learn new information. Pascale would best be classified as what type of psychologist?\n(A) sociocultural (B) clinical (C) cognitive (D) behaviorist\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Sociocultural psychologist focuses on the effect of societal factors on people. Clinical psychologist focuses on people with mental issues. Cognitive psychologist focuses on how people think and learn, including the processing strategies. Behaviorist focuses more on the environment and experience effect on people. The answer is (C).\n\nQ: According to Caplan's model of consultee-centered case consultation, the consultant is primarily interested in\n(A) identifying the causes and solutions of the client's presenting problems (B) identifying and eliminating the causes of the consultee's difficulties in handling a problem (C) establishing a hierarchy of authority to enable effective decision making (D) presenting a single, well-defined and unambiguous course of action for the consultant to overcome skills deficits\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Caplan defines two type of consultation. Client-centered case consultation aims to handle client's problems, while consultee-centered case consultation aims to identify the reason of client's difficulty to solve problems. The answer is (B).\n\nQ: According to the Individuals with Disabilities Education Improvement Act, which of the following must an educational agency do before it changes the educational placement of a student with a disability?\n(A) Give the child a trial period in the new environment (B) Notify the parents in writing (C) Obtain school board approval (D) Obtain parental consent\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. When the decision to change the educational placement of a student with a disability is made, the educational agency must notify the parents in writing on that date. The answer is (B).\n\nQ: While swimming in the ocean, Ivan is frightened by a dark shadow in the water even before he has the chance to identify what the shadow is. The synaptic connections taking place during this incident of fright are best described by which of the following?\n(A) Messages are sent from the thalamus directly to the amygdala. (B) Messages are sent from the thalamus to the \"what\" and \"where\" pathways. (C) Messages are sent from the parasympathetic nervous system to the cerebral cortex. (D) Messages are sent from the frontal lobes to the pituitary gland.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Our neural system has a mechanism that can respond immediate emotional signal before going to the thought center. In the Ivan's case, messages travel directly from thalamus to amygdala. The answer is (A).\n\nQ: Ani believes that her attitudes and behavior play a central role in what happens to her. Such a belief is likely to be associated with\n(A) a strong superego. (B) low self-esteem. (C) low self-efficacy. (D) an internal locus of control.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. People with an external locus of control believes fate and luck play an important role in their lives, while people with an internal locus of control believes they control their lives. The answer is (D).", "high_school_statistics": "The following are multiple choice questions (with answers) about high school statistics.\n\nQ: A new smartwatch is manufactured in one part of a factory, then secured for shipping in another, independent part of the factory. The weight of the smartwatch has a mean of 62 grams and a standard deviation of 1.0 grams. The weight of the packaging (box, user's guide, bubble wrap, etc.) has a mean of 456 grams and a standard deviation of 6 grams. Together, the distribution of the weight of the smartwatch and its packaging would have the following mean and standard deviation:\n(A) Mean 518 grams; standard deviation 7.0 grams (B) Mean 518 grams; standard deviation 3.5 grams (C) Mean 518 grams; standard deviation 6.1 grams (D) Mean 394 grams; standard deviation 6.1 grams\nA: Let's think step by step. Since the weight of the watch and the weight of the packaging are independent random variables, the mean and variance of their sum is equal to the sum of their individual means and variances. So the mean is 62 + 456 = 518 grams, and the variances is 1.0^2 + 6.0^2 = 37, leading to a standard deviation of 6.1 grams. The answer is (C).\n\nQ: After a frost warning was issued, the owner of a large orange grove asked his workers to spray all his trees with water. The water was supposed to freeze and form a protective covering of ice around the orange blossom. Nevertheless, the owner suspected that some trees suffered considerable damage due to the frost. To estimate the proportion of trees that suffered more than 50 percent damage due to the frost, he took a random sample of 100 trees from his grove. What is the response variable in this experiment?\n(A) The proportion of trees that suffered more than 50 percent damage due to frost. (B) The number of trees affected by the frost. (C) The number of trees sampled from the grove. (D) For each sampled tree, whether it suffered more than 50 percent damage or at most 50 percent damage.\nA: Let's think step by step. In this experiment, the response variable is what is measured. For each tree, what is measured is whether or not it suffered more than 50 percent damage due to the frost. The answer is (D).\n\nQ: Suppose X and Y are random variables with E(X) = 37, var(X) = 5, E(Y) = 62, and var(Y) = 12. What are the expected value and variance of the random variable X + Y?\n(A) E(X + Y) = 99, var(X + Y) = 8.5 (B) E(X + Y) = 99, var(X + Y) = 13 (C) E(X + Y) = 99, var(X + Y) = 17 (D) There is insufficient information to answer this question.\nA: Let's think step by step. While means of sums of random variables add (regardless of whether the variables are independent) in order to determine the variance of a sum of random variables, we need to know not just their individual variances but the covariance of the two variables, which is not given in this problem. The answer is (D).\n\nQ: Which of the following sets has the smallest standard deviation? Which has the largest?\nI: {1,2,3}\nII: {-10,10}\nIII: {100}\n(A) I, II (B) II, III (C) III, I (D) III, II\nA: Let's think step by step. The variance of distribution I is the expected squared deviation from its mean (which is 2), so the variance is 2/3 . The variance of distribution II is 10^2 (because both elements are 10 away from the mean of zero). The variance of distribution III is 0, since it has a single entry. So distribution III has the smallest standard deviation and distribution II has the largest. The answer is (D).\n\nQ: Which of the following is a correct statement about correlation?\n(A) If the slope of the regression line is exactly 1, then the correlation is exactly 1. (B) If the correlation is 0, then the slope of the regression line is undefined. (C) Switching which variable is called x and which is called y changes the sign of the correlation. (D) The correlation r is equal to the slope of the regression line when z-scores for the y-variable are plotted against z-scores for the x-variable.\nA: Let's think step by step. Statement A is false because the slope of the regression line being exactly 1 can occur even when the two variables are not perfectly correlated. Statement B is false because uncorrelated variables regression lines can have slope zero. Statement C is false because correlation is symmetric in the two random variables. The answer is (D).", "high_school_us_history": "The following are multiple choice questions (with answers) about high school us history.\n\nQ: This question refers to the following information.\nI come not to urge personal claims, nor to seek individual benefits; I appear as the advocate of those who cannot plead their own cause; I come as the friend of those who are deserted, oppressed, and desolate. In the Providence of God, I am the voice of the maniac whose piercing cries from the dreary dungeons of your jails penetrate not your Halls of Legislation. I am the Hope of the poor crazed beings who pine in the cells, and stalls, and cages, and waste rooms of your poor-houses. I am the Revelation of hundreds of wailing, suffering creatures, hidden in your private dwellings, and in pens and cabins\u2014shut out, cut off from all healing influences, from all mind-restoring cares.\u2026 Could their melancholy histories be spread before you as revealed to my grieved spirit during the last three months, how promptly, how earnestly would you search out the most approved means of relief; how trifling, how insignificant, by comparison, would appear the sacrifices you are asked to make; how would a few dimes and dollars, gathered from each citizen, diminish in value as a possession, compared with the certain benefits and vast good to be secured for the suffering insane...by the consecration and application of a sufficient fund to the construction of a suitable hospital.\u2026\n\u2014Dorothea Dix, Memorial Soliciting a State Hospital for the Protection and Cure of the Insane,\nSubmitted to the General Assembly of North Carolina, November 1848\nDorothea Dix can best be compared to whom?\n(A) Abigail Adams (B) Clara Barton (C) Shirley Temple (D) Hillary Clinton\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. Both Dorothea Dix and Clara barton are American nurses. The answer is (B).\n\nQ: This question refers to the following information.\n\"As our late Conduct at the Conestoga Manor and Lancaster have occasioned much Speculation & a great diversity of Sentiments in this and neighboring Governments; some vindicating & others condemning it; some charitably alleviating the Crime, & others maliciously painting it in the most odious & detestable Colours, we think it our duty to lay before the Publick, the whole Matter as it appeared, & still appears, to us. . . .\n\"If these things are not sufficient to prove an unjustifiable Attachment in the Quakers to the Indians Savages, a fixed Resolution to befriend them & an utter insensibility to human Distresses, let us consider a few more recent Facts. When we found the last Summer that we were likely to get no Assistance from the Government, some Volunteers went out at our own Expense, determined to drive our Enemies from our Borders; & when we came near to the great Island, we understood that a Number of their Warriors had gone out against our Frontiers. Upon this we returned and came up with them and fought with them at the Munfey Hill where we lost some of our Men & killed some of their Warriors & thereby saved our Frontiers from this Story in another Expedition. But no sooner had we destroyed their Provisions on the great Island, & ruined their trade with the good People at Bethlehem, but these very Indians, who were justly suspected of having murdered our Friends in Northampton County, were by the Influence of some Quakers taken under the Protection of the Government to screen them from the Resentments of the Friends and Relations of the Murdered, & to support them thro the Winter.\"\n\u2014\"Apology of the Paxton Boys\" (pamphlet), 1764 (Note: \"apology\" in this context should be read as an explanation, not an admission of guilt or regret.\nThe sentiments expressed in the explanation above reflect which of the ongoing tensions during the colonial period of American history?\n(A) Tensions between British policies and the aspirations of North American colonists. (B) Tensions between American Indians allied with the French and those allied with the British. (C) Tensions between freed African Americans and white planters. (D) Tensions between backcountry settlers and elites within colonial America.\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. After the French and Indian War, the Scotch-Irish settlers attacked American Indians. After the attacks on the Conestoga, about 250 Paxton Boys present their grievances to the Pennsylvania legislature. As mentioned in the information, the Paxton Boys cited resentiment at local elites. The answer is (D).\n\nQ: This question refers to the following information.\nOur leaders talk about stopping aggression from the north, but this was a struggle among groups of Vietnamese until we intervened. We seem bent upon saving the Vietnamese from Ho Chi Minh even if we have to kill them and demolish their country to do it. As the native people survey bombed-out villages, women and children burned by napalm, rice crops destroyed and cities overrun with our military personnel, they are doubtless saying secretly of the Vietcong guerillas and of the American forces, \"A plague on both your houses.\" \u2026 Stop the bombing, north and south, end search and destroy offensive sweeps, and confine our military action to holding operations on the ground. Bombing the north has failed to halt or seriously check the flow of troops to the south and may, in fact, have prompted a much greater war effort by Hanoi.\n\u2014Senator George McGovern, \"The Lessons of Vietnam,\" April 25, 1967\nWhich of the following opinions from the 1960s most directly reflects the perspective of George McGovern's speech?\n(A) Americans must maximize their technological edge in Vietnam. (B) American bombing in Vietnam is step by step leading to progress in the war. (C) American bombing in Vietnam is a failure. (D) America must not give in to defeatism about the war in Vietnam.\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. \"Stop the bombing\" and \"Bombing the north has failed to halt or seriously check the flow of troops to the south\" indicate that the perspective of George McGovern's speech is that Amerian bombing in Vietnam is a failure. The answer is (C).\n\nQ: This question refers to the following information.\n\"In the new Code of Laws which I suppose it will be necessary for you to make I desire you would Remember the Ladies, and be more generous and favorable to them than your ancestors. Do not put such unlimited power into the hands of the Husbands. Remember all Men would be tyrants if they could. If particular care and attention is not paid to the Ladies we are determined to foment a Rebellion, and will not hold ourselves bound by any Laws in which we have no voice, or Representation.\"\nAbigail Adams, in a letter to John Adams, 1776\n\"Special legislation for woman has placed us in a most anomalous position. Women invested with the rights of citizens in one section\u2014voters, jurors, office-holders\u2014crossing an imaginary line, are subjects in the next. In some States, a married woman may hold property and transact business in her own name; in others, her earnings belong to her husband. In some States, a woman may testify against her husband, sue and be sued in the courts; in others, she has no redress in case of damage to person, property, or character. In case of divorce on account of adultery in the husband, the innocent wife is held to possess no right to children or property, unless by special decree of the court. But in no State of the Union has the wife the right to her own person, or to any part of the joint earnings of the co-partnership during the life of her husband. In some States women may enter the law schools and practice in the courts; in others they are forbidden. In some universities girls enjoy equal educational advantages with boys, while many of the proudest institutions in the land deny them admittance, though the sons of China, Japan and Africa are welcomed there. But the privileges already granted in the several States are by no means secure.\"\nSusan B. Anthony, \"Declaration of Rights for Women,\" July 4, 1876\nThe sentiments expressed in the second excerpt by Susan B. Anthony are most likely in support of\n(A) the Equal Rights Amendment (B) universal suffrage (C) states' rights (D) prohibition\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. The above information mentioned that women are in an anomalous position in terms of legislation. Women's earnings do not belong to themselves, or they cannot testify against her husbands. Susan believes women should have equal legal rights as men. The answer is (B).\n\nQ: This question refers to the following information.\n\"Society in every state is a blessing, but government even in its best state is but a necessary evil; in its worst state an intolerable one; for when we suffer, or are exposed to the same miseries by a government, which we might expect in a country without government, our calamity is heightened by reflecting that we furnish the means by which we suffer. Government, like dress, is the badge of lost innocence; the palaces of kings are built on the ruins of the bowers of paradise. For were the impulses of conscience clear, uniform, and irresistibly obeyed, man would need no other lawgiver; but that not being the case, he finds it necessary to surrender up a part of his property to furnish means for the protection of the rest; and this he is induced to do by the same prudence which in every other case advises him out of two evils to choose the least. Wherefore, security being the true design and end of government, it unanswerably follows that whatever form thereof appears most likely to ensure it to us, with the least expense and greatest benefit, is preferable to all others.\"\nThomas Paine, Common Sense, 1776\nWhich of the following \"miseries\" alluded to above were most condemned by Anti-Federalists of the post-Revolutionary era?\n(A) Organized response to Bacon's Rebellion (B) Federal response to Shays's Rebellion (C) Federal response to the Whiskey Rebellion (D) Federal response to Pontiac's Rebellion\nA: Let's think step by step. We refer to Wikipedia articles on us history for help. Anti-Federalists do not believe centralized government power, and suspect Washington's military response to Whiskey Rebellion. Bacon's Rebellion and Pontiac's Rebellion happen before the Revolution and they can be ruled out. The answer is (C).", "high_school_world_history": "The following are multiple choice questions (with answers) about high school world history.\n\nQ: This question refers to the following information.\n\"At least one of the [world's] societies would have to somehow enormously increase its productivity [in order to achieve global hegemony]. That quantum jump would have to be made before the various scientific, technological, agricultural, and industrial revolutions on which our post-quantum-leap world rests. It could only be accomplished by exploiting the ecosystems, mineral resources, and human assets of whole continents outside the lands of the society making the jump. Western Europe did just that by means of its brutality and guns and, more important, by geographical and ecological luck.\"\nCopyright \u00a9 2015 Cambridge University Press.\nAlfred Crosby, historian, Ecological Imperialism, 2004\nThe \"quantum jump\" mentioned in the passage most directly contributed to which of the following developments in the period 1450\u20131750 C.E.?\n(A) A breakdown in trade routes through the collapse of the established state structure (B) An increase in the population of the world through more plentiful supplies of food (C) The spread of Chinese and Indian belief systems across the world (D) An increase in social unrest\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The \"quantum jump\" mentioned in the passage refers to the conquest of the New World and the Columbian Exchange. Choice (A) and (C) did not happen in history. Choice (C) refers to the human assets. The answer is (B).\n\nQ: This question refers to the following information.\n\"The struggle against neo-colonialism is not aimed at excluding the capital of the developed world from operating in less developed countries. It is aimed at preventing the financial power of the developed countries being used in such a way as to impoverish the less developed.\nNon-alignment, as practiced by Ghana and many other countries, is based on co-operation with all States whether they be capitalist, socialist or have a mixed economy. Such a policy, therefore, involves foreign investment from capitalist countries, but it must be invested in accordance with a national plan drawn up by the government of the non-aligned State with its own interests in mind. The issue is not what return the foreign investor receives on his investments\u2026The question is one of power. A State in the grip of neo-colonialism is not master of its own destiny.\"\nKwame Nkrumah, Neo-Colonialism, 1965\nWhich of the following provides the best context for Nkrumah's writings?\n(A) The Industrial Revolution (B) Decolonization (C) Regional Free Trade Associations (D) Autarky\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The passage expresses a point that the successful fight against neo-colonialism were in danger and the newly independent nations like Ghana may be re-colonized via financial power of the developed countries. The answer is (B).\n\nQ: This question refers to the following information.\n\"Indeed, as both the fatwas of distinguished [scholars] who base their opinion on reason and tradition alike and the consensus of the Sunni community agree that the ancient obligation of extirpation, extermination, and expulsion of evil innovation must be the aim of our exalted aspiration, for \"Religious zeal is a victory for the Faith of God the Beneficent\"; then, in accordance with the words of the Prophet (Peace upon him!) \"Whosoever introduces evil innovation into our order must be expelled\" and \"Whosoever does aught against our order must be expelled,\" action has become necessary and exigent\u2026\"\nLetter from Ottoman Sultan Selim I to Safavid Shah Ismail I, 1514\nThe letter from Selim I is most clearly an example of which of the following?\n(A) The maintenance of military supremacy at all costs (B) Expanding tensions between religious sects (C) Factors that brought about the collapse of the Ottoman Empire (D) Peacemaking efforts among the Islamic empires\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The passage is an example of expanding tensions between Selim and Ismail. In the passage the Selim references the fatwa and the consensus of the Sunni community to against whosoever introduces evil. The answer is (B).\n\nQ: This question refers to the following information.\n\"The real grievance of the worker is the insecurity of his existence; he is not sure that he will always have work, he is not sure that he will always be healthy, and he foresees that he will one day be old and unfit to work. If he falls into poverty, even if only through a prolonged illness, he is then completely helpless, exam_ins to his own devices, and society does not currently recognize any real obligation towards him beyond the usual help for the poor, even if he has been working all the time ever so faithfully and diligently. The usual help for the poor, however, leaves a lot to be desired, especially in large cities, where it is very much worse than in the country.\"\nOtto von Bismarck, 1884\nOtto von Bismarck likely made this speech in reaction to which of the following issues?\n(A) Social acceptance of child labor (B) Declining life expectancy in Germany (C) Criticisms of German trade tariffs (D) Negative effects attributed to industrial capitalism\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. The passage talks about the grievance of the work under the industrial capitalism. The answer is (D).\n\nQ: This question refers to the following information.\nHe contains all works and desires and all perfumes and all tastes. He enfolds the whole universe and in silence is loving to all. This is the Spirit that is in my heart, this is Brahman. To him I shall come when I go beyond this life, and to him will come he who has faith and doubts not.\n\u2014The Upanishads, India, c. 1000 BCE\nTo which religion does the speaker most likely belong?\n(A) Hinduism (B) Buddhism (C) Shintoism (D) Zoroastrianism\nA: Let's think step by step. We refer to Wikipedia articles on world history for help. Brahman refers to the ultimate reality of all things in the Hindu religion. In contrast, Buddhism does not have a concept of supreme God. The answer is (A).", "human_aging": "The following are multiple choice questions (with answers) about human aging.\n\nQ: All other things being equal, which of the following persons is more likely to show osteoporosis?\n(A) An older Hispanic American woman (B) An older African American woman (C) An older Asian American woman (D) An older Native American woman\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. Although osteoporosis can occur at any age, the risk is higher for older people. It is most common in Asian and non-Hispanic white women. The answer is (C).\n\nQ: The finding that adults tend to remember events from their adolescence better than from other periods in their lives is referred to as the\n(A) Adolescence advantage (B) Reminiscence bump (C) Memorial memorial (D) Quadratic retrieval spike\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. Reminiscence bump is a phenomenon that older adults tend to recollect events during their young ages. People usually have a period of childhood amnesia from birth to around age 5, and a reminiscence bump between 10 and 30. The answer is (B).\n\nQ: Which element in tobacco smoke is responsible for cancers?\n(A) Nicotine (B) Tar (C) Carbon monoxide (D) Smoke particles\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. The benzene, acrylamide and acrylonitrile in tar interact with the lungs and cause DNA mutations in cells of the lungs, and lead to cancer. The answer is (B).\n\nQ: When older adults move to a new state after retirement, which of the following is the more likely destination?\n(A) Texas (B) California (C) Hawaii (D) Vermont\nA: Let's think step by step. We refer to Wikipedia articles on human aging for help. Texas does not have state tax, and has low cost of living compared with the other three options. The answer is (A).", "human_sexuality": "The following are multiple choice questions (with answers) about human sexuality.\n\nQ: The most common disorder among men who seek sexual therapy is:\n(A) premature ejaculation (B) inhibited ejaculation (C) erectile disorder (D) ejaculatory disorder\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. The most common type of arousal disorder for men is erectile dysfunction, meaning that a person is not able to get any physical satisfaction from sexual activity although he may be interested in it. The answer is (C).\n\nQ: A woman who knows she has active herpes and untreated syphilis but continues to have sex without informing her partners of her condition has, in psychoanalytic terms:\n(A) a strong ego (B) a weak superego (C) a weak id (D) a strong superego\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. A person with weak superego tends to be delinquent, criminal or have antisocial personality. The action of the woman who knows she has active venereal disease but still have sex with her partners indicate she may has antisocial personality. The answer is (B).\n\nQ: The nature of homosexual activities that occur during preadolescence include all but which of the following?\n(A) sexual intercourse (B) circle jerks (C) exhibitionism (D) touching each other's genitals\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. Sexual intercourse is prohibited during preadolescence. The answer is (A).\n\nQ: Women's ability to have multiple orgasms is primarily due to:\n(A) the fact that they do not have a refractory period. (B) the response of the inner layers of the vagina. (C) having alternating orgasms in different locations. (D) the G-Spot.\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. The refractory period is the time when a person is not able to be erect or is not interested in sex. The answer is (A).\n\nQ: Morning sickness is typically a problem:\n(A) during the first trimester (B) during the second trimester (C) during the third trimester (D) all through the pregnancy\nA: Let's think step by step. We refer to Wikipedia articles on human sexuality for help. Morning sickness usually begins by nine weeks after conception, corresponding to the first trimester. The answer is (A).", "international_law": "The following are multiple choice questions (with answers) about international law.\n\nQ: How the consent to be bound of a State may be expressed?\n(A) The consent of a State to be bound is expressed only by ratification (B) The consent of a state to be bound by a treaty may be expressed by signature, ratification, acceptance, approval or accession (C) The consent of a State to be bound is expressed by signature (D) The consent of a State to be bound is expressed by whatever means they choose\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. Article 11 of Vienna Convention on the Law of Treaties signed in 1969 states that \"the consent of a State to be bound by a treaty may be expressed by signature, exchange of instruments constituting a treaty, ratification, acceptance, approval or accession, or by any other means if so agreed.\" (B) is the most precise and accurate answer. The answer is (B).\n\nQ: What is the judge ad hoc?\n(A) If a party to a contentious case before the ICJ does not have a national sitting as judge, it is entitled to nominate someone as a judge solely for that case, with the title of judge ad hoc (B) Judge ad hoc is the member of the bench of the ICJ with a casting vote (C) Judge ad hoc is a surrogate judge, in case a judge is disqualified or passes away (D) Judge ad hoc is the judge that each party will always nominate in every contentious case\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. As \"ad hoc\" implies, a judge ad hoc is appointed only for a specific case or period, when a party to a contentious case before the International Court of Justice does not have a regular national sitting as judge. The answer is (A).\n\nQ: When 'consent' can serve as a circumstance precluding the wrongfulness of a State conduct?\n(A) Consent can serve as a circumstance precluding the wrongfulness whenever it is given (B) Consent can never serve as a circumstance precluding wrongfulness (C) Consent can serve as a circumstance precluding wrongfulness, provided the consent is valid and to the extent that the conduct remains within the limits of the consent given (D) Consent can always serve as a circumstance precluding wrongfulness, no matter which organ of the State gives it\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. Valid consent can serve as a circumstance precluding the wrongfulness of a State conduct if the conduct remains within the limits of that consent, according to Chapter V of the Responsibility of States for Internationally Wrongful Acts, 2001, United Nations. The answer is (C).\n\nQ: Would a reservation to the definition of torture in the ICCPR be acceptable in contemporary practice?\n(A) This is an acceptable reservation if the reserving country's legislation employs a different definition (B) This is an unacceptable reservation because it contravenes the object and purpose of the ICCPR (C) This is an unacceptable reservation because the definition of torture in the ICCPR is consistent with customary international law (D) This is an acceptable reservation because under general international law States have the right to enter reservations to treaties\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. For it contravenes the object and purpose of the ICCPR, this is an unacceptable reservation in contemporary practice. The answer is (B).\n\nQ: What types of force does Article 2(4) of the UN Charter prohibit?\n(A) Article 2(4) encompasses only armed force (B) Article 2(4) encompasses all types of force, including sanctions (C) Article 2(4) encompasses all interference in the domestic affairs of States (D) Article 2(4) encompasses force directed only against a State's territorial integrity\nA: Let's think step by step. We refer to Wikipedia articles on international law for help. Article 2(4) of the UN Charter prohibits states from using armed forces in their international relations. The answer is (A).", "jurisprudence": "The following are multiple choice questions (with answers) about jurisprudence.\n\nQ: Iverson Jewelers wrote a letter to Miller, 'We have received an exceptionally fine self winding Rolox watch which we will sell to you at a very favorable price.'\n(A) The letter is an offer to sell (B) A valid offer cannot be made by letter. (C) The letter contains a valid offer which will terminate within a reasonable time. (D) The letter lacks one of the essential elements of an offer.\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. An offer shows the intent to enter into a mutually-beneficial contract with specific terms. An offer can be made by a letter. While this letter indicates the willingness to sell, the lack of specific terms, such as transaction price and offer expiration date, makes it an incomplete offer. The answer is (D).\n\nQ: Functions of the law include all but which of the following?\n(A) maximizing individual freedom (B) providing a basis for compromise (C) keeping the peace (D) promoting the principles of the free enterprise system\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. Laws are fundamentally about helping resolve disputes between individuals, and therefore essential for maximizing individual freedom, providing a basis for compromise, and keeping the peace. The answer is (D).\n\nQ: The ________ School of jurisprudence postulates that the law is based on what is \"correct.\"\n(A) Natural Law (B) Analytical (C) Historical (D) Sociological\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. Natural Law School of jurisprudence focuses on the laws of nature, and states that the law should be based on ethics, morals, and what is \"correct\". Analytical deals with the law as it already exists, Historical postulates that the law was found and not made, and Sociological studies how the law and society impact each other. The answer is (A).\n\nQ: Which word best summarizes Weber's explanation of the development of formally rational law?\n(A) Authority. (B) Charisma. (C) Co-operation. (D) Capitalism.\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. Weber explained the development of formal rationality in laws as how the modern society moved from tradition to rationality, where people decide actions based less on how they were culturally done and more on expected utilities. How rational individuals optimize efficiency of accomplishing tasks for higher rewards is a core principle of Capitalism. The answer is (D).\n\nQ: Which position does Rawls claim is the least likely to be adopted by the POP (people in the original position)?\n(A) The POP would choose equality above liberty. (B) The POP would opt for the 'maximin' strategy. (C) The POP would opt for the 'difference principle'. (D) The POP would reject the 'system of natural liberty.'\nA: Let's think step by step. We refer to Wikipedia articles on jurisprudence for help. The POP would opt for the 'maximin' strategy, opt for the 'difference principle', and reject the 'system of natural liberty', but the POP would not choose equality above liberty, since the POP assume both equal and free citizens. The answer is (A).", "logical_fallacies": "The following are multiple choice questions (with answers) about logical fallacies.\n\nQ: When an arguer causes confusion during refutation because of real or feigned lack of an ability to engage in refutation, that arguer may have committed the fallacy of\n(A) poor sportsmanship (B) appeal to compassion (C) argument against the person (D) ignorance of refutation\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. Ignorance of refutation, one of Aristotle's original list of logical fallacies in his Organon, is when someone causes confusion in an argument through real or feigned inability to engage in refutation, in order to win the argument. The answer is (D).\n\nQ: The complex question fallacy consists of\n(A) arguing something is inferior just because it doesn't do something it was never intended to do. (B) including more than one claim in the proposition and treating proof for one claim as proof for all the claims. (C) drawing a conclusion before examining the evidence, and only considering evidence that supports that conclusion. (D) asking a question that includes either an unproven assumption or more than one question, thus making a straightforward yes or no answer meaningless.\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. The complex question fallacy is when someone makes a single yes or no answer to a question meaningless, by including either an unproven assumption or many questions. The latter is also known as the many questions fallacy. The answer is (D).\n\nQ: Arguing that what is true of the parts must be true of the whole is the fallacy of...\n(A) Division (B) Composition (C) Appeal to the person (D) Appeal to ignorance\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. Fallacy of composition occurs when someone argues what is true of the parts must be true of the whole. The answer is (B).\n\nQ: Which of the following is true of a valid categorical syllogism?\n(A) The minor premise must deny the antecedent (B) The major premise must affirm the consequent (C) The middle term must be used in at least one premise in a universal or unqualified sense (D) All of the above\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. A valid categorical syllogism must satisfy several conditions: (1) the syllogism must have exactly three terms (2) every term of the syllogism must be used twice exactly, (3) a term may be used only once in any premise, and (4) the middle term must be used in at least one premise in a universal or unqualified sense, etc. Only (C) is true. The answer is (C).\n\nQ: If someone attacks the character of an opposing arguer, instead of responding to that opponent's arguments, the first person has probably committed which of the following fallacies?\n(A) tu quoque (B) horse laugh (C) argument against the person (D) ignoratio elenchi\nA: Let's think step by step. We refer to Wikipedia articles on logical fallacies for help. The argument against the person fallacy occurs when someone irrelevantly attacks the character of an opposing arguer, instead of addressing that opponent's arguments. The answer is (C).", "machine_learning": "The following are multiple choice questions (with answers) about machine learning.\n\nQ: Which image data augmentation is most common for natural images?\n(A) random crop and horizontal flip (B) random crop and vertical flip (C) posterization (D) dithering\nA: Let's think step by step. Data augmentation is used to increase the diversity of images in the training dataset. It is important that natural images are kept natural after being augmented. Vertical flips of images are not natural, so (B) is false. Posterization makes the image look like a poster and and dithering increases color depth. None of these two preserve the natural property. The only natural data augmentation technique is (A). The answer is (A).\n\nQ: Traditionally, when we have a real-valued input attribute during decision-tree learning we consider a binary split according to whether the attribute is above or below some threshold. Pat suggests that instead we should just have a multiway split with one branch for each of the distinct values of the attribute. From the list below choose the single biggest problem with Pat\u2019s suggestion:\n(A) It is too computationally expensive. (B) It would probably result in a decision tree that scores badly on the training set and a testset. (C) It would probably result in a decision tree that scores well on the training set but badly on a testset. (D) It would probably result in a decision tree that scores well on a testset but badly on a training set.\nA: Let's think step by step. Because the input is real valued, it is unlikely that the same values appear both at training and test time. This means that while such a decision tree could yield good performance on the training data, when evaluated on the test data it will perform badly because the decision tree won\u2019t know what to do with numbers that did not appear in the training data. The answer is (C).\n\nQ: You are reviewing papers for the World\u2019s Fanciest Machine Learning Conference, and you see submissions with the following claims. Which ones would you consider accepting?\n(A) My method achieves a training error lower than all previous methods! (B) My method achieves a test error lower than all previous methods! (Footnote: When regularisation parameter \u03bb is chosen so as to minimise test error.) (C) My method achieves a test error lower than all previous methods! (Footnote: When regularisation parameter \u03bb is chosen so as to minimise cross-validaton error.) (D) My method achieves a cross-validation error lower than all previous methods! (Footnote: When regularisation parameter \u03bb is chosen so as to minimise cross-validaton error.)\nA: Let's think step by step. In machine learning, we train with some data and fixed hyperparameters and the training error can be arbitrarily low, so (A) can\u2019t be right. Then, one compares different hyperparameters by selecting the model with the lowest cross-validation error, this means that (B) and (D) are not the right procedure. The only relevant number after these is the test error and thus (C) is the right answer. The answer is (C).\n\nQ: A 6-sided die is rolled 15 times and the results are: side 1 comes up 0 times; side 2: 1 time; side 3: 2 times; side 4: 3 times; side 5: 4 times; side 6: 5 times. Based on these results, what is the probability of side 3 coming up when using Add-1 Smoothing?\n(A) 2.0/15 (B) 1.0/7 (C) 3.0/16 (D) 1.0/5\nA: Let's think step by step. Add-1 smoothing adds the value of one to the different counts and then normalizes the probabilities accordingly. The counts after adding one will be: side 1 comes up 1 time; side 2: 2 times; side 3: 3 times; side 4: 4 times; side 5: 5 times; side 6: 6 times. The number of sum one die rolls will be 21, so the probability of drawing a three is 3/21 = 1/7. The answer is (B).\n\nQ: To achieve an 0/1 loss estimate that is less than 1 percent of the true 0/1 loss (with probability 95%), according to Hoeffding's inequality the IID test set must have how many examples?\n(A) around 10 examples (B) around 100 examples (C) between 100 and 500 examples (D) more than 1000 examples\nA: Let's think step by step. By the Hoeffding\u2019s inequality, we expect that with 95% probability the in-sample and out-of-sample errors differ by epsilon when we have N samples if 2 exp(-2 epsilon^2 N)<0.05, this implies that N > -1/(2*epsilon**2) log ( 0.05/2 )= log (40)*5000. Since log(40)>1, we have that one needs more than 1000 examples. The answer is (D).", "management": "The following are multiple choice questions (with answers) about management.\n\nQ: How can organisational structures that are characterised by democratic and inclusive styles of management be described?\n(A) Hierarchical (B) Bureaucratic (C) Flat (D) Functional\nA: Let's think step by step. We refer to Wikipedia articles on management for help. Flat organizational structures are characterized by democratic and inclusive styles of management, and have few (if any) levels of management between the workers and managers. The answer is (C).\n\nQ: Hygiene factors are associated with which writer?\n(A) Frederick Hertzberg (B) D.C. McClelland (C) Abraham Maslow (D) Douglas McGregor\nA: Let's think step by step. We refer to Wikipedia articles on management for help. Hygiene factors include compensation, company policies, supervision, interpersonal relations, and work environments. Hertzberg lists them as factors that cannot motivate employees but can minimize job dissatisfaction. The answer is (A).\n\nQ: What characteristic is not a key feature of the 'open systems' model of management?\n(A) Morale (B) Innovation (C) Growth resource (D) Adaptation\nA: Let's think step by step. We refer to Wikipedia articles on management for help. The key characteristics of an open system in management include innovation, growth resource, and adaption, but do not include morale. The answer is (A).\n\nQ: Which element of the cultural web forms regalia?\n(A) Symbols (B) Rituals and routines (C) Power structures (D) Control systems\nA: Let's think step by step. We refer to Wikipedia articles on management for help. The cultural web is a tool for mapping an organization's culture, where symbols form the regalia that visually expresses the values that the organization holds as important. The answer is (A).\n\nQ: What are the two main dimensions of the Ohio Studies into leadership?\n(A) Starting position and end position (B) Initial environment and changed environment (C) Organisational structure and conditioning (D) Initiating structure and considerations\nA: Let's think step by step. We refer to Wikipedia articles on management for help. The Ohio State Leadership Studies conducted in the 1940s identified initiating structure and consideration as the two main dimensions of leader behavior. The answer is (D).", "marketing": "The following are multiple choice questions (with answers) about marketing.\n\nQ: Although the content and quality can be as controlled as direct mail, response rates of this medium are lower because of the lack of a personal address mechanism. This media format is known as:\n(A) Care lines. (B) Direct mail. (C) Inserts. (D) Door to door.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Door to door marketing delivers non-addressed items within all buildings within a geographic area. While it can control the content and quality as well as direct mail marketing, its response rate is lower because of the lack of a personal address mechanism. The answer is (D).\n\nQ: In an organization, the group of people tasked with buying decisions is referred to as the _______________.\n(A) Outsourcing unit. (B) Procurement centre. (C) Chief executive unit. (D) Decision-making unit.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. In an organization, the group of the people tasked with buying decision is referred to as the decision-making unit. The answer is (D).\n\nQ: The single group within society that is most vulnerable to reference group influence is:\n(A) The older consumer who feels somewhat left out of things. (B) The married women, many of whom feel a need for stability in their lives. (C) New immigrants who really want to assimilate into their new culture. (D) Children, who base most of their buying decisions on outside influences.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Children, who mostly based their buying decisions on outside influences, are the single group within society that is more vulnerable to reference group influence. The answer is (D).\n\nQ: Which of the following is an assumption in Maslow's hierarchy of needs?\n(A) Needs are dependent on culture and also on social class. (B) Lower-level needs must be at least partially satisfied before higher needs can affect behaviour. (C) Needs are not prioritized or arranged in any particular order. (D) Satisfied needs are motivators, and new needs emerge when current needs remain unmet.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Maslow's hierarchy of needs, from the bottom upwards, are physiological (food and clothing), safety, love and belonging needs, esteem, and self-actualization. Lower-level needs must be at least partially satisfied before higher ones can affect behavior. The answer is (B).\n\nQ: _____________ is a natural outcome when combining demographic and geographic variables.\n(A) Geodemographics (B) Product differentiation. (C) ANSOFF matrix. (D) Brand management.\nA: Let's think step by step. We refer to Wikipedia articles on marketing for help. Geodemographics is a natural outcome when combining demographic and geographic variables. The answer is (A).", "medical_genetics": "The following are multiple choice questions (with answers) about medical genetics.\n\nQ: The stage of meiosis in which chromosomes pair and cross over is:\n(A) prophase I (B) metaphase I (C) prophase II (D) metaphase II\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. Prophase I is the stage of meiosis where homologous chromosomes pair with each other and exchange genetic material. The answer is (A).\n\nQ: DNA ligase is\n(A) an enzyme that joins fragments in normal DNA replication (B) an enzyme of bacterial origin which cuts DNA at defined base sequences (C) an enzyme that facilitates transcription of specific genes (D) an enzyme which limits the level to which a particular nutrient reaches\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. DNA ligase is a type of enzyme (EC 6.5.1.1) responsible for joining DNA strands together by catalyzing a phosphodiester bond. The answer is (A).\n\nQ: Which of the following conditions does not show multifactorial inheritance?\n(A) Pyloric stenosis (B) Schizophrenia (C) Spina bifida (neural tube defects) (D) Marfan syndrome\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. Multifactorial inheritance is when more than a single factor is responsible for causing a given trait or health problem. Genes cannot be the only factor. Marfan syndrome, on the other hand, requires only one abnormal copy of the of the Marfan gene, from one parent, to inherit the trait. The answer is (D).\n\nQ: A gene showing codominance\n(A) has both alleles independently expressed in the heterozygote (B) has one allele dominant to the other (C) has alleles tightly linked on the same chromosome (D) has alleles expressed at the same time in development\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. Codominance, as it relates to genetics, refers to a type of genetic inheritance where the phenotype of both the parents is easily observed in the offspring. A heterozygote is an individual having two different alleles of a gene. The answer is (A).\n\nQ: Large triplet repeat expansions can be detected by:\n(A) polymerase chain reaction. (B) single strand conformational polymorphism analysis. (C) Southern blotting. (D) Western blotting.\nA: Let's think step by step. We refer to Wikipedia articles on medical genetics for help. A Southern blot is a method in molecular biology for detecting specific DNA sequences in a sample. Large triplet repeat expansions are usually detected with this method. The answer is (C).", "miscellaneous": "The following are multiple choice questions (with answers) about miscellaneous.\n\nQ: Which of these songs was a Top 10 hit for the rock band The Police?\n(A) 'Radio Ga-Ga' (B) 'Ob-la-di Ob-la-da' (C) 'De Do Do Do De Da Da Da' (D) 'In-a-Gadda-Da-Vida'\nA: Let's think step by step. We refer to Wikipedia for help. Radio Ga-Ga is by Queen. Ob-la-di Ob-la-da is by The Beatles. And In-a-Gadda-Da-Vida is by Iron Butterfly. Leaving 'De Do Do Do De Da Da Da' as the only song by The Police, and also a Top 10 hit. The answer is (C).\n\nQ: What place is named in the title of the 1979 live album by rock legends Cheap Trick?\n(A) Budapest (B) Budokan (C) Bhutan (D) Britain\nA: Let's think step by step. We refer to Wikipedia for help. Nippon Budokan is an indoor arena in Tokyo, Japan renowned for hosting rock music concerts including Cheap Trick in 1978. 'Cheap Trick at Budokan' became the name of their album. The answer is (B).\n\nQ: What is produced during photosynthesis?\n(A) hydrogen (B) nylon (C) oxygen (D) light\nA: Let's think step by step. We refer to Wikipedia for help. Photosynthesis is the process in which green plants use the green pigment chlorophyll to synthesize foods with water and carbon dioxide. Oxygen is the byproduct of this process. The answer is (C).\n\nQ: Who is the shortest man to ever win an NBA slam dunk competition?\n(A) Anthony 'Spud' Webb (B) Michael 'Air' Jordan (C) Tyrone 'Muggsy' Bogues (D) Julius 'Dr J' Erving\nA: Let's think step by step. We refer to Wikipedia for help. In 1986, Spud Webb, standing only 5'7\" became the shortest NBA player in history to win an official slam dunk contest. The answer is (A).\n\nQ: How many axles does a standard automobile have?\n(A) one (B) two (C) four (D) eight\nA: Let's think step by step. We refer to Wikipedia for help. Most cars have two axles to rotate the wheels.. The answer is (B).", "moral_disputes": "The following are multiple choice questions (with answers) about moral disputes.\n\nQ: Baron admits that the versions of the ticking bomb hypothetical she discusses are \"stunningly stupid,\" but she claims this is actually evidence of\n(A) the stupidity of most traditional philosophical examples. (B) a general lack of intelligence among people with advanced degrees. (C) the wrongness of torture. (D) the readiness on the part of many intelligent people to see torture as the best solution to deal with terrorism.\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. The ticking bomb hypothetical poses a problem where many people will die to an exploding bomb, if the hypothetical terrorist does not disclose how to defuse it. Baron sees this hypothetical as silly, but its prevalence does suggest intelligent people, particularly utilitarians, see torture as justifiable to save the lives in this scenario. The answer is (D).\n\nQ: A fertilized ovum is also known as\n(A) a zygote. (B) an embryo. (C) a viability. (D) a blastocyst.\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. Once a single sperm penetrates the layers of an egg to form a new cell, that cell is called a zygote. The answer is (A).\n\nQ: Pence compares six different cases of reproduction, from natural twinning to SCNT. What conclusion does he draw from this comparison?\n(A) SCNT is not a different kind of reproduction because there are no morally relevant differences between it and other permissible means of reproduction. (B) Because there is a low risk of harm for natural twinning, there will be a low risk of harm for SCNT. (C) both A and B (D) neither A nor B\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. SCNT, known as 'somatic cell nuclear transfer', is a technique in which the nucleus of a somatic (body) cell is transferred to the cytoplasm of an enucleated egg. This is used in farm animal cloning. Pence may compare this to other processes of reproduction, and see little moral separation. The answer is (A).\n\nQ: Which of the following is an intrinsic good, according to Singer?\n(A) being in the upper class (B) the ability to learn and solve problems (C) being more beautiful than most of the population (D) being shorter than average\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. Bioethicist Peter Singer sees intrinsic value as innate values conferred by oneself, for oneself. Innanimate objects can be beautiful, short, or have some valuable criteria, but capabilities are intrinsically good. The answer is (B).\n\nQ: According to Metz, what is wrong with consequentialist arguments against capital punishment based on African values?\n(A) It is unclear as of yet whether or not capital punishment deters harm to the community. (B) It is unclear as of yet whether or not capital punishment deters harm to any individuals. (C) Consequentialism is not supported by African values. (D) Even though consequentialism is supported by African values, no consequentialist arguments framed in terms of African values have been offered.\nA: Let's think step by step. We refer to Wikipedia articles on moral disputes for help. Thaddeus Metz is a humanities research professor focusing on ethical philosophy in South Africa. Metz has written the death penalty is unjustified as it treats individuals as incapable of communal relations. It is unclear that capital punishment is to the benefit of, or a deterrent of harm to the community. The answer is (A).", "moral_scenarios": "The following are multiple choice questions (with answers) about moral scenarios.\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I took his wallet after he harassed me at the bar. Scenario 2 | I made love to my newborn baby boy.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Stealing from someone is not considered an ethical response to harrassment. And having sex with a minor is egregiously unethical. The answer is (A).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I asked for a cigarette from the guy outside the store. Scenario 2 | When the security guard left his post, I pocketed a rare coin.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Asking for something is not morally wrong. Stealing is morally wrong. The answer is (C).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | During the house's demolition, I pushed him into a wall. Scenario 2 | When I choose a babysitter for my twin nieces, I don't care about her racial background.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Physical violence is morally wrong. Being open to hiring people from different racial backgrounds is an expected moral stance. The answer is (B).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I bought an old pick-up truck for work. Scenario 2 | I looked up at the gorgeous waterfall and threw a rock at it.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Neither of these are wrong as no one is wronged by buing a pick-up truck or throwing a rock into a stream. The answer is (D).\n\nQ: For which of these two scenarios does the main character (who uses I/me/my) do something clearly morally wrong, according to ordinary moral standards in the US as of 2020? Scenario 1 | I really loved my newborn baby boy. Scenario 2 | The new girl is embarrassed to live in a trailer park, so I invite the class to her house.\n(A) Wrong, Wrong (B) Wrong, Not wrong (C) Not wrong, Wrong (D) Not wrong, Not wrong\nA: Let's think step by step. We refer to Wikipedia articles on moral scenarios for help. Loving someone is not wrong. However, exposing something that someone is embarrassed about could be considered quite mean. The answer is (C).", "nutrition": "The following are multiple choice questions (with answers) about nutrition.\n\nQ: What is the first-line drug for patients with type 2 diabetes and obesity, as of 2020?\n(A) Acarbose (B) Metformin (C) Sulphonylureas (D) Insulin\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. Metformin (Fortamet, Glumetza, or others) is usually the first medication prescribed for type 2 diabetes, as well as obesity. It works by lowering glucose production in the liver and improving the body's sensitivity to insulin. The answer is (B).\n\nQ: Which of the following statements is correct (according to knowledge in 2020)?\n(A) Consumers with phenylketonuria must avoid the consumption of the sweetener aspartame (B) Consumers with phenylketonuria must avoid the consumption of the sweetener saccharin (C) Consumers with phenylketonuria must avoid the consumption of the sweetener sucralose (D) Consumers with phenylketonuria must avoid the consumption of the sweetener acesulfame K\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. People with phenylketonuria (PKU) cannot break down the amino acid phenylalanine. As it builds up in the blood and brain it can lead to brain damage. People with PKU should avoid foods that are converted to phenylalanine in the body, such as aspartame. The answer is (A).\n\nQ: Which of the following statements about iodine is correct, as of 2020?\n(A) 50% of adults consume iodine at levels below the RNI (B) Dairy products are a poor source of iodine (C) The iodine content of organic milk is generally lower that the level in non-organic milk (D) UK dietary reference values recommend an increase in iodine intake in pregnancy\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. Organic milk usually has less iodine content than non-organic milk. The answer is (C).\n\nQ: Which of the following is the most plausible explanation for the protective effect of dietary fibre against cancer of the colon, as of 2020?\n(A) Propionic acid, formed during colonic fibre fermentation inhibits liver fatty acid synthesis (B) Butyric acid, formed during colonic fibre fermentation stimulates \"silencing\" of the SLC5A8 tumour suppressor gene (C) None of these options are correct (D) Butyric acid, formed during colonic fibre fermentation stimulates anti-oxidant defences in the colon\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. Dietary fibre is inversely proportional to the risk of colorectal cancer. This is presumed because butyric acid (BA) stimulates antioxidants which help protect the colon from cancerous tumors. The answer is (D).\n\nQ: In a cohort study, the risk ratio of developing diabetes was 0.86 when comparing consumers of tea (the exposed) to those who did not drink tea (the unexposed). Which one statement is correct (according to knowledge in 2020)?\n(A) The tea drinkers have lower risk of developing diabetes. (B) The tea drinkers have higher risk of developing diabetes. (C) Based on the information given we cannot tell if the observed difference in disease risk is the result of chance. (D) The risk ratio is close to the value one, so there is no difference in disease risk between the two groups.\nA: Let's think step by step. We refer to Wikipedia articles on nutrition for help. The risk ratio is not sufficiently reduced that it could not be explained by random chance given the studies sample size. The answer is (C).", "philosophy": "The following are multiple choice questions (with answers) about philosophy.\n\nQ: The study of reality in the broadest sense, an inquiry into the elemental nature of the universe and the things in it, is known as _____.\n(A) metaphysics (B) epistemology (C) quantum physics (D) axiology\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Among the options, only metaphysics studies the nature of reality and existence. The answer is (A).\n\nQ: According to Moore\u2019s \u201cideal utilitarianism,\u201d the right action is the one that brings about the greatest amount of:\n(A) pleasure. (B) happiness. (C) good. (D) virtue.\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Moore's \"ideal utilitarianism\" states that one's actions should maximize intrinsic goods. The answer is (C).\n\nQ: Before Tolstoy's Christian conversion, what was his perspective on the meaning of life?\n(A) optimist (B) satisfied (C) nominally religious (D) pessimist\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Before his conversion, Tolstoy feels that life was uncertain, which is a pessimist's point of view. The answer is (D).\n\nQ: According to d'Holbach, people always act according to _____.\n(A) free choices (B) dictates of the soul (C) necessary natural laws (D) undetermined will\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. d'Holbach believes that people act according to necessary laws, and it proves nothing about people's free will. The answer is (C).\n\nQ: Psychological egoism is:\n(A) an ethical theory about how we ought to behave. (B) a generalization concerning the way people tend to behave. (C) a claim about human nature and the ways people are capable of behaving. (D) none of the above.\nA: Let's think step by step. We refer to Wikipedia articles on philosophy for help. Psychological egoism suggests that one behaves based on what makes one feels good, hence it is a claim about human nature and how humans are capable of behaving. The answer is (C).", "prehistory": "The following are multiple choice questions (with answers) about prehistory.\n\nQ: What is the approximate mean cranial capacity of Homo erectus?\n(A) under 650 cc (B) about 800 cc (C) just under 1000 cc (D) 1200 cc\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. The average cranium capacity of Homo erectus is less than 1000 cubic cm. The answer is (C).\n\nQ: According to Timothy Pauketat, the evidence for social stratification and political power at Cahokia suggests:\n(A) a center of Mississippian civilization with conditions similar to the rise of early states. (B) the limitations of authority in a Native American society of egalitarian foragers. (C) a simple chiefdom or perhaps a complex chiefdom had evolved by A.D. 1500. (D) a center of Mississippian civilization with conditions similar to societies on the Northwest Coast of North America.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Timothy Pauketat is known for his research on Cahokia, the center of the Mississippian culture, where he found similar conditions to the rise of early states. The answer is (A).\n\nQ: Recent research on hominid species dating from the Middle Pliocene indicates there was (as of 2020):\n(A) a great amount of species diversity, or a single species that exhibited a lot of diversity. (B) very little species diversity during this period and very few hominids. (C) decreased species diversity due to a prolonged ice age followed by a severe drought. (D) decreased species diversity but increased numbers of hammerstones and flakes, indicating stone tool manufacture.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Recent research has recognized multiple hominid species from the Middle Pliocene, meaning that there is a great amount of species diversity or diversity in a single species. The answer is (A).\n\nQ: Researchers now believe that the decline of the Maya was caused chiefly by:\n(A) a cataclysm of some kind, such as an earthquake, volcano, or tsunami. (B) ecological degradation resulting from slash-and-burn farming techniques. (C) endless wars between neighboring Mayan city-states. (D) practices of interbreeding that led to a steep rise in congenital disorders.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Researchers believe that the Maya collapse was mainly caused by over-exploitation of natural resources like the slash-and-burn farming techniques. The answer is (B).\n\nQ: The great Mayan king Pacal built temples in the city of Palenque in order to:\n(A) satisfy the powerful Mayan astronomer priests. (B) display his generosity to the common people, since they were allowed to live in the temples. (C) frighten away enemies, in particular the Spaniards. (D) legitimize his kingship, since his father was not royal.\nA: Let's think step by step. We refer to Wikipedia articles on prehistory for help. Pacal built the temples as the funerary monument to legitimize his kingship. The answer is (D).", "professional_accounting": "The following are multiple choice questions (with answers) about professional accounting.\n\nQ: An auditor traces the serial numbers on equipment to a nonissuer\u2019s subledger. Which of the following management assertions is supported by this test?\n(A) Valuation and allocation (B) Completeness (C) Rights and obligations (D) Presentation and disclosure\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. The completeness assertion is tested by tracing supporting documents to the record entries. The answer is (B).\n\nQ: One hundred years ago, your great-great-grandmother invested $100 at 5% yearly interest. What is the investment worth today?\n(A) $13,000 (B) $600 (C) $15,000 (D) $28,000\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. A $100 investment at 5% yearly interest is worth 100*(1.05)^100=13150 after 100 years, which is around $13,000. The answer is (A).\n\nQ: On January 1, year 1, Alpha Co. signed an annual maintenance agreement with a software provider for $15,000 and the maintenance period begins on March 1, year 1. Alpha also incurred $5,000 of costs on January 1, year 1, related to software modification requests that will increase the functionality of the software. Alpha depreciates and amortizes its computer and software assets over five years using the straight-line method. What amount is the total expense that Alpha should recognize related to the maintenance agreement and the software modifications for the year ended December 31, year 1?\n(A) $5,000 (B) $13,500 (C) $16,000 (D) $20,000\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. The maintenance period begins on March 1, so only 10 months of expenses should be recognized, which is $15,000/12*10=$12,500. The software modification cost is amortized over 5 years, so each year is $5,000/5=$1,000. So the total expense is $12,500+$1,000=$13,500. The answer is (B).\n\nQ: Krete is an unmarried taxpayer with income exclusively from wages. By December 31, year 1, Krete's employer has withheld $16,000 in federal income taxes and Krete has made no estimated tax payments. On April 15, year 2, Krete timely filed for an extension request to file her individual tax return, and paid $300 of additional taxes. Krete's year 1 tax liability was $16,500 when she timely filed her return on April 30, year 2, and paid the remaining tax liability balance. What amount would be subject to the penalty for underpayment of estimated taxes?\n(A) $0 (B) $500 (C) $1,650 (D) $16,500\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. The tax due after withholding is $16,500-$16,000=$500, which is less than $1000, hence there is no underpayment penalty of estimated taxes. The answer is (A).\n\nQ: Box a nongovernmental not-for-profit organization had the following transactions during the year: Proceeds from sale of investments $80000 Purchase of property plant and equipment $10000 Proceeds from long-term debt $100000 Loss on sale of investment $5000 What amount should be reported as net cash provided by financing activities in Box's statement of cash flows?\n(A) $70,000 (B) $75,000 (C) $80,000 (D) 100000\nA: Let's think step by step. We refer to Wikipedia articles on accounting for help. Among the four transactions, only Proceeds from long-term debt belongs to the financing activities section of cashflow, hence the amount reported should be $100000. The answer is (D).", "professional_law": "The following are multiple choice questions (with answers) about professional law.\n\nQ: A son owed a creditor $5,000. The son's father contacted the creditor and told him that he wanted to pay the son's debt. The father signed a document that stated the father would pay the son's debt at a rate of $500 a month for 10 months. The creditor made no written or oral commitment to forbear to sue the son to collect the $5,000 debt, and the father made no oral or written request for any such forbearance. For the next five months, the father made and the creditor accepted the $500 monthly payments as agreed. During that period, the creditor, in fact, did forbear to take any legal action against the son. However, the father then informed the creditor that he would make no further payments on the debt. Which of the following is the most persuasive argument that the father is liable to the creditor under the terms of their agreement?\n(A) The father's promise and the creditor's reliance thereon, if proved, gave rise to a valid claim by the creditor against the father based on the doctrine of promissory estoppel. (B) Because it was foreseeable that the father's promise would induce the creditor to forbear taking any action against the son, such forbearance was, as a matter of law, a bargained-for consideration for the father's promise. (C) The father's five payments to the creditor totaling $2,500 manifested a serious intent on the father's part to be contractually bound, and such manifestation is generally recognized as an effective substitute for consideration. (D) By assuming the antecedent debt obligation that the son owed to the creditor, the father became a surety whose promise to the creditor was enforceable, since it was in writing and supported by adequate consideration. \nA: Let's think step by step. We refer to Wikipedia articles on law for help. The doctrine of promissory estoppel stops a person from going back on a promise in contract law, hence option (A) should be the most persuasive argument. The answer is (A).\n\nQ: A state has recently enacted a statute prohibiting the disposal of any nuclear wastes within the state. This law does not contravene or conflict with any federal statutes. A man operates a company in the state that is engaged in the disposal of nuclear wastes. Subsequent to the passage of the state statute, the man, not yet aware of the new law, entered into contracts with many out-of-state firms to dispose of their nuclear wastes in the state. On account of this new law, however, the man will be unable to perform these contracts. Assume that the man has standing to challenge this state law. Which of the following presents his strongest constitutional grounds to challenge the state law prohibiting the disposal of nuclear wastes within the state?\n(A) The commerce clause. (B) The equal protection clause of the Fourteenth Amendment. (C) The privileges and immunities clause of Article IV, Section 2. (D) The contract clause.\nA: Let's think step by step. We refer to Wikipedia articles on law for help. The commerce clause states that Congress shall have the power to regulate commerce with foreign Nations, and among the several States, and with the Indian Tribes. The statute affects inter-state commerce which puts it into question. Hence the man's strongest argument should be the commerce clause. The answer is (A).\n\nQ: On October 1, 1980, a developer, owner of several hundred acres in a rural county, drafted a general development plan for the area. The duly recorded plan imposed elaborate limitations and restrictions upon the land in the plan, which was to be developed as a residential district. The restrictions were to extend to all persons acquiring any of the lots and to their heirs, assigns, and lessees. It was further provided that all subsequent owners would be charged with due notice of the restrictions. Among those restrictions in the general plan were the following:(22) A franchise right is created in a strip of land 10 feet in width along the rear of each lot for the use of public utility companies with right of ingress and egress. (23) No house or structure of any kind shall be built on the aforementioned strip of land running through the said blocks. In 2000, a retiree purchased one of the lots, built a house, and erected a fence in the rear of his property within the restricted area. In 2004, a teacher purchased a lot adjacent to the retiree's property and built a new house. Two years later, a librarian purchased the lot that adjoined the teacher's property. The three deeds to those properties each contained references to the deed book where the general plan was recorded. In 2008, the librarian began the construction of a seven-foot post-and-rail fence along the line dividing his lot with the teacher's, and along the center of the area subject to the franchise right. Although the teacher objected to its construction, the fence was completed. If the teacher seeks a mandatory injunction to compel removal of the librarian's fence, the court will most likely\n(A) grant relief, because the fence was in violation of the easement restriction. (B) grant relief, because the encroachment of the fence violated the restriction in the original plan. (C) deny relief, because the teacher failed to enforce the restriction against the retiree. (D) deny relief, because the fence would not be construed as \"a structure\" within the terms of the restriction. \nA: Let's think step by step. We refer to Wikipedia articles on law for help. The restrictions in the original plan say no house or structure of any kind shall be built on the aforementioned strip of land running through the said blocks. Hence the court will most likely grant relief because the fence violated the restriction in the original plan. The answer is (B).\n\nQ: Judge took judicial notice of some facts at the beginning of the trial. Which of the following is not an appropriate kind of fact for judicial notice?\n(A) Indisputable facts. (B) Facts that have been asserted by individual political organizations. (C) Facts recognized to be true by common knowledge. (D) Facts capable of scientific verification.\nA: Let's think step by step. We refer to Wikipedia articles on law for help. Among the options, facts that have been asserted by individual political organizations is not an appropriate kind of fact for judicial notice. The answer is (B).\n\nQ: A state legislature has recently enacted a statute making it a misdemeanor to curse or revile or use obscene or opprobrious language toward or in reference to a police officer perfonning his duties. A student at a state university organized a demonstration on campus to protest the war. The rally was attended by a group of 50 students who shouted anti-war messages at cars passing by. To show his contempt for the United States, the student sewed the American flag to the rear of his jeans. When a police officer saw the flag sown on the student's jeans, he approached and told him to remove the flag or he would be placed under arrest. The student became angered and shouted at the police officer, \"Listen, you bastard, I'll wear this rag anywhere I please. \" The student was subsequently placed under arrest and charged with violating the state statute. The student subsequently brings suit in state court challenging the constitutionality of the statute. The strongest constitutional argument for the student is that\n(A) the statute is void for vagueness under the Fourteenth Amendment's due process clause. (B) the statute is invalid because it violates the petitioner's freedom of speech under the First Amendment. (C) the statute is an abridgment of freedom of speech under the First Amendment because less restrictive means are available for achieving the same purpose. (D) the statute is overbroad and consequently invalid under the First and Fourteenth Amendments.\nA: Let's think step by step. We refer to Wikipedia articles on law for help. The Fourteenth Amendment further supports the First Amendment by establishing a due process clause. Hence the strongest argument should be the statute is overbroad and consequently invalid under the First and Fourteenth Amendments. The answer is (D).", "professional_medicine": "The following are multiple choice questions (with answers) about professional medicine.\n\nQ: A 22-year-old male marathon runner presents to the office with the complaint of right-sided rib pain when he runs long distances. Physical examination reveals normal heart and lung findings and an exhalation dysfunction at ribs\u00a04-5 on the right. Which of the following muscles or muscle groups will be most useful in correcting this dysfunction utilizing a direct method?\n(A) anterior scalene (B) latissimus dorsi (C) pectoralis minor (D) quadratus lumborum\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. Among the options, only pectoralis minor muscle origins from the outer surfaces of the 3rd to 5th ribs. The answer is (C).\n\nQ: A 36-year-old male presents to the office with a\u00a03-week\u00a0history of low back pain. He denies any recent trauma but says that he climbs in and out of his truck numerous times a day for his job. Examination of the patient in the prone position reveals a deep sacral sulcus on the left, a posterior inferior lateral angle on the right, and a lumbosacral junction that springs freely on compression. The most likely diagnosis is\n(A) left-on-left sacral torsion (B) left-on-right sacral torsion (C) right unilateral sacral flexion (D) right-on-right sacral torsion\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The deep sulcus on the left, a posterior ILA on the right, with a negative spring test suggests a right-on-right sacral torsion. All other options have a deep sulcus on the right. The answer is (D).\n\nQ: A 44-year-old man comes to the office because of a 3-day history of sore throat, nonproductive cough, runny nose, and frontal headache. He says the headache is worse in the morning and ibuprofen does provide some relief. He has not had shortness of breath. Medical history is unremarkable. He takes no medications other than the ibuprofen for pain. Vital signs are temperature 37.4\u00b0C (99.4\u00b0F), pulse 88/min, respirations 18/min, and blood pressure 120/84 mm Hg. Examination of the nares shows erythematous mucous membranes. Examination of the throat shows erythema and follicular lymphoid hyperplasia on the posterior oropharynx. There is no palpable cervical adenopathy. Lungs are clear to auscultation. Which of the following is the most likely cause of this patient's symptoms?\n(A) Allergic rhinitis (B) Epstein-Barr virus (C) Mycoplasma pneumonia (D) Rhinovirus\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The symptoms, especially the headache, suggest that the most likely cause is Rhinovirus. Epstein-Barr virus will cause swollen lymph nodes but there is no palpable cervical adenopathy. Lungs are clear to auscultation suggests it's not Mycoplasma pneumonia. The answer is (D).\n\nQ: A previously healthy 32-year-old woman comes to the physician 8 months after her husband was killed in a car crash. Since that time, she has had a decreased appetite and difficulty falling asleep. She states that she is often sad and cries frequently. She has been rechecking the door lock five times before leaving her house and has to count exactly five pieces of toilet paper before she uses it. She says that she has always been a perfectionist but these urges and rituals are new. Pharmacotherapy should be targeted to which of the following neurotransmitters?\n(A) Dopamine (B) Glutamate (C) Norepinephrine (D) Serotonin\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The patient feels sad and among the options, only Dopamine and Serotonin can help increase positive emotions. Serotonin also affects digestion and metabolism, which can help the patient's decreased appetite and sleep difficulty. The answer is (D).\n\nQ: A 42-year-old man comes to the office for preoperative evaluation prior to undergoing adrenalectomy scheduled in 2 weeks. One month ago, he received care in the emergency department for pain over his right flank following a motor vehicle collision. At that time, blood pressure was 160/100 mm Hg and CT scan of the abdomen showed an incidental 10-cm left adrenal mass. Results of laboratory studies, including complete blood count, serum electrolyte concentrations, and liver function tests, were within the reference ranges. The patient otherwise had been healthy and had never been told that he had elevated blood pressure. He takes no medications. A follow-up visit in the office 2 weeks ago disclosed elevated urinary normetanephrine and metanephrine and plasma aldosterone concentrations. The patient was referred to a surgeon, who recommended the adrenalectomy. Today, vital signs are temperature 36.6\u00b0C (97.9\u00b0F), pulse 100/min, respirations 14/min, and blood pressure 170/95 mm Hg. Physical examination discloses no significant findings. Initial preoperative preparation should include treatment with which of the following?\n(A) Labetalol (B) A loading dose of potassium chloride (C) Nifedipine (D) Phenoxybenzamine\nA: Let's think step by step. We refer to Wikipedia articles on medicine for help. The symptoms and the adrenal mass suggested pheochromocytoma, and the blood pressure indicates hypertension. Phenoxybenzamine is used to treat hypertension caused by pheochromocytoma. The answer is (D).", "professional_psychology": "The following are multiple choice questions (with answers) about professional psychology.\n\nQ: In the construction of a multiple regression equation for purposes of prediction, the optimal combination of measures is one in which the predictors\n(A) are uncorrelated with each other but are moderately correlated with the criterion (B) have low correlations with each other and low correlations with the criterion (C) are highly intercorrelated with each other and moderately correlated with the criterion (D) have low correlations with the criterion bur are moderately correlated with each other\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. The basis of multiple regression is to assess the relationship between one continuous variable and a set of independent variables. So the predictors should be uncorrelated with each other but are moderately correlated with the criterion. The answer is (A).\n\nQ: There are three ways to measure the Central Tendency: the Mean, the Median and the Mode. From your knowledge about them, what is the mode?\n(A) less sensitive to extreme scores than the mean (B) more useful for skewed distributions (C) sensitive to extreme values and highly skewed distributions (D) the most frequently occurring number\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. The definition of mode is the most frequently occurring number. The answer is (D).\n\nQ: Carl Jung believed that a client's transference:\n(A) is a fantasy that distracts the client from reality. (B) represents \u201cmixed feelings\u201d toward the therapist. (C) \"is a form of \"\"acting out.\"\"\" (D) reflects the client\u2019s personal and collective unconscious.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Transference is a phenomenon that a person's feelings are unconsciously redirected, so it reflects the client's personal and collective unconscious. The answer is (D).\n\nQ: In terms of Hofstede\u2019s (1980) five cultural dimensions, the United States scores at the top of the scale on:\n(A) individualism. (B) individualism and power distance. (C) power distance and masculinity. (D) uncertainty avoidance.\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. US scores highest on individualism among the five cultural dimensions. The answer is (A).\n\nQ: One of your therapy clients asks your advice about a good weight- reduction program. You have investigated the programs in the community and are enrolled in the one you consider the best. This program offers a $50 bonus to its patrons for each new person they bring into the program. Under these circumstances, your most appropriate response would be to\n(A) tell your client the pros and cons of each program you know about except for the one in which you are enrolled (B) recommend to your client the program in which you are enrolled and explain the $50 bonus you will receive (C) recommend to your client the program in which you are enrolled and offer to have the $50 bonus credited to your client's account in the program (D) tell your client the pros and cons of each program you know about, but do not claim the $50 bonus if your client enrolls in your program\nA: Let's think step by step. We refer to Wikipedia articles on psychology for help. Based on the circumstances, you should tell your client about the pros and cons of each program, but it would be inappropriate to receive the bonus, so you should not claim the $50 bonus. The answer is (D).", "public_relations": "The following are multiple choice questions (with answers) about public relations.\n\nQ: Earth Hour was a campaign launched by which organization?\n(A) Greenpeace (B) The UN (C) Oxfam (D) World Wildlife Fund\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. Earth Hour is a worldwide movement oragnized launched by the World Wildlife Fund. The answer is (D).\n\nQ: In issues management, what is the most proactive approach to addressing negative or misleading information posted online about your organization?\n(A) Buy domain names that could be used by opposition groups. (B) Post anonymous comments on blogs to combat this information. (C) Prepare a news release that discredits the inaccurate information. (D) Make policy changes to address complaints highlighted on these sites.\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. In issues management, the most proactive approach to addressing negative or misleading information posted online is to make policy changes to address complaints highlighted on those sites. The answer is (D).\n\nQ: At which stage in the planning process would a situation analysis be carried out?\n(A) Defining the program (B) Planning the program (C) Taking action and implementing ideas (D) Evaluation of the program\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. Situation analyses are typically carried out during the planning process stage of defining the program. The answer is (A).\n\nQ: Which of these statements is true of the Vatican in 2010 at the time of the accusations of child abuse cover-ups?\n(A) There was a coordinated media response. (B) Consistent messages were communicated. (C) Criticisms were taken as attacks on the Catholic Church. (D) The credibility of the Vatican was upheld.\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. In 2010 when there were accusations of child abuse cover-ups, the Vatican took those criticisms as attacks on the Catholic Church. The answer is (C).\n\nQ: What should a public relations media practitioner do if she does not know the answer to a reporter's question?\n(A) Give the reporter other information she is certain is correct. (B) Say that the information is 'off the record' and will be disseminated later. (C) Say 'I don't know' and promise to provide the information later. (D) Say 'no comment,' rather than appear uninformed.\nA: Let's think step by step. We refer to Wikipedia articles on public relations for help. If a public relations media practitioner does not know the answer to a reporter's question, they should say 'I don't know' and offer to provide the information later. The answer is (C).", "security_studies": "The following are multiple choice questions (with answers) about security studies.\n\nQ: What are the frameworks of analysis within which terrorism has been considered (as of 2020)?\n(A) Competition between larger nations has resulted in some countries actively supporting terrorist groups to undermine the strength of rival states. Terrorist networks are extended patronage clubs maintained and paid for by their donor states and are conceptualised as being like state actors, to be dealt with using military force. (B) Globalization has enabled the internationalization of terrorist activities by opening up their operational space, although coordination is still managed from a geographical base. This suggests that terrorist groups are nationally structured which means that terrorism cannot be considered in terms of a war to be defeated militarily without having serious implications on the indigenous population. (C) Terrorism can be viewed as a problem to be resolved by military means (war on terrorism), by normal police techniques (terrorism as crime), or as a medical problem with underlying causes and symptoms (terrorism as disease). (D) Terrorism is viewed as a criminal problem. The criminalization of terrorism has two important implications. Firstly, it suggests that terrorism can be eradicated - terrorists can be caught and brought to trial by normal judicial proceedings thereby removing the threat from society - and secondly, it suggests that preventative crime techniques are applicable to prevent its development.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. (A) is wrong because it is not competition between larger nations that causes terrorism. \n(B) is wrong because globalization is not the cause of terrorism.\n(C) is correct because the US undertook the war on terrorism. \n(D) is wrong because preventative crime techniques will likely not end terrorism. The answer is (C).\n\nQ: Which of the following is the best lens through which to investigate the role of child soldiers?\n(A) Child soldiers are victims of combat that need re-education and rehabilitation. (B) Children and their mothers are not active subjects in warfare and are best considered as subjects in the private sphere. (C) Children are most often innocent bystanders in war and are best used as signifiers of peace. (D) Children have political subjecthood that is missed when they are considered as passive victims of warfare.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. Child soliders as a political topic can be missed when they are considered passive victims of warfare. The answer is (D).\n\nQ: How can we best describe the relationship between the state-centric approach and the concept of human security?\n(A) There are such wide divisions within the human security framework regarding the nature of threats and referent objects that no widely applicable comparisons between state-centric approaches and human security can be drawn. (B) By adopting the framework of human security, the limitations of the realist state-centric approach become evident. Whilst human security defines the referent object as the person or population, state-centric approaches prioritise the security of the state, de-prioritizing the pursuit of human security. (C) The state-centric approach to security is a faction of human security, usually defined within the broad school of human security. By being state-centric this approach prioritises the individual as the referent object in security studies. (D) Both the state-centric and human-centric approaches to security are mutually exclusive and offer a sufficient analytic framework with which to understand the international security system. It is therefore the role of security analysts to determine which of these substantial concepts is correct, and which should be discarded.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. Human security focuses on a person or population whereas state-centric approaches focus on the state while deprioritizing human security. The answer is (B).\n\nQ: In order to become securitized, a threat must be presented in which of these ways?\n(A) As an existential threat that requires immediate and extraordinary action, posing a threat to the survival of the state or to societal security. (B) As requiring immediate and extraordinary action by the state, threatening the survival of a referent object and therefore warranting the use of measures not normally employed in the political realm. (C) As an urgent threat to the survival of the referent object, so serious that it legitimises the employment of extraordinary action in response. (D) As an urgent threat to the survival of the audience that requires extraordinary or emergency measures.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. To be securitized, a threat must be an urgent threat to the survival of the referent object. The answer is (C).\n\nQ: What distinguishes coercive diplomacy from military force?\n(A) Compellence is another term for coercive diplomacy, but covering a narrower set of criteria; compellence covers those threats aimed at initiating adversary action. A threat to coerce a state to give up part of its territory would count as coercive diplomacy, as long as that threat proactively initiates action before reactive diplomacy is taken. (B) Coercive diplomacy constitutes the threats of limited force to induce adversary's incentive to comply with the coercer's demands. It is an influence strategy that is intended to obtain compliance: the use of force to defeat an opponent first does not count. It leaves an element of choice with the target to comply, or to continue. (C) Military force, or the threat of military force, utilises fear to achieve strategic objectives. Coercive diplomacy is differentiated from this approach, because it does not use fear as a tool for coercing an adversary. (D) Coercive diplomacy is employed to use force but to limit its effects on the international community. Coercive diplomacy is an aggressive strategy that is intended to obtain compliance through defeat. It does not leave an element of choice with the target, the target either being forced to comply or engage in conflict. It seeks to control by imposing compliance by removing any opportunity for negotiation or concession.\nA: Let's think step by step. We refer to Wikipedia articles on security studies for help. Coercive diplomacy uses the threat of force to induce the opponent to comply with demands. The answer is (B).", "sociology": "The following are multiple choice questions (with answers) about sociology.\n\nQ: Which of the following is not a problem associated with official statistics on strike action?\n(A) most strikes go unnoticed by employers and the mass media (B) not all industrial disputes will be reported by the employer (C) the definition of strikes excludes those that involve fewer than ten workers or last less than one day (D) it is hard to compare strikes that were measured in different ways\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. Official statistics on strike action can be problematic because not all industrial disputes will be reported by employers, the definition of strikes excludes those that involves fewer than ten workers or last less than one day, and it is hard to compare strikes that were measured in different ways. Thus, (A) is not a problem associated with official statistics on strike action. The answer is (A).\n\nQ: What does Berger (1963) describe as a metaphor for social reality?\n(A) a fairground ride (B) a circus (C) a puppet theatre (D) a ballet\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. Berger describes social reality using the metaphor of a puppet theatre. The answer is (C).\n\nQ: The term 'hegemony' refers to:\n(A) the tendency for the working class not to realize their own interests (B) a dominant ideology that legitimates economic, political and cultural power (C) a form of dual consciousness based on ideology and everyday experiences (D) a mode of payment given for outstanding topiary\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. Hegemony refers to a dominant ideology that legitimates economic, policital, and cultural power. The answer is (B).\n\nQ: The shift from 'civil religion' to 'common religion' means that:\n(A) the increasing bureaucracy of the state has made religion only a marginal part of our lives (B) despite the weakening of traditional authority, our everyday lives and 'common sense' remain shaped by religious beliefs and values (C) religious participation in collective worship may have declined, but people still practise their faiths in private (D) people are much more likely to discuss their religious beliefs in public, informal settings\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. The shift from civil religion to common religion means that despite the weakening of traditional authority, our everyday lives and common sense remain shaped by religious beliefs and values. The answer is (B).\n\nQ: Which of the following did the post-war welfare state of 1948 not aim to provide:\n(A) free health care and education for all (B) a minimum wage (C) full employment (D) universal welfare\nA: Let's think step by step. We refer to Wikipedia articles on sociology for help. The post-war welfare state of 1948 aimed to provide free healthcare and education, full employment, and universal welfare. But it did not aim to provide a minimum wage. The answer is (B).", "us_foreign_policy": "The following are multiple choice questions (with answers) about us foreign policy.\n\nQ: How did Donald Trump attack globalization in the 2016 campaign?\n(A) Globalization had made men like him too rich (B) Globalization only benefited certain American states, such as New York (C) Liberal elites had encouraged globalization, while 'ordinary Americans' lost jobs because of it (D) Globalization encouraged damaging trade wars\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. Trump attacked globalization because he believed ordinary Americans lost jobs due to it, and so he wanted to blame liberals who had encouraged it. The answer is (C).\n\nQ: How did NSC-68 change U.S. strategy?\n(A) It globalized containment. (B) It militarized containment. (C) It called for the development of the hydrogen bomb. (D) All of the above\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. NSC-68 outlined a variety of courses of action, including globalization of containment, militarization of contaiment, and the development of the hydrogen bomb. The answer is (D).\n\nQ: How do Defensive Realism and Offensive Realism differ in their explanation of state behaviour?\n(A) Defensive realists place greater emphasis on the role of international institutions (B) Defensive realists place less emphasis on geographical factors (C) Offensive realists give more priority to the national interest than Defensive realists. (D) Defensive realists believe states are security maximizers, while Offensive realists believe states to be power maximizers\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. While defensive realism advocates that states are security maximizers, offensive realists think of states as power maximizers. The answer is (D).\n\nQ: The realm of policy decisions concerned primarily with relations between the United States and the rest of the world is known as\n(A) terrorism policy. (B) economic policy. (C) foreign policy. (D) international policy.\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. The topic of policy decisions concerns with relations between the US and the rest of the world is known as foreign policy. The answer is (C).\n\nQ: How did the 2008 financial crisis affect America's international reputation?\n(A) It damaged support for the US model of political economy and capitalism (B) It created anger at the United States for exaggerating the crisis (C) It increased support for American global leadership under President Obama (D) It reduced global use of the US dollar\nA: Let's think step by step. We refer to Wikipedia articles on us foreign policy for help. The 2008 financial crisis damanged the international reputation of the American model of political economy and capitalism. The answer is (A).", "virology": "The following are multiple choice questions (with answers) about virology.\n\nQ: The median survival time to AIDS and death was established by following:\n(A) Seroprevalent HIV-infected individuals (B) Seronegatives (C) Seroconverters (D) High-risk seronegatives\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. The median survival time to AIDS and death was established as a result of the development of seroconverters. The answer is (C).\n\nQ: Which of the following is a morphological characteristic of the paramyxoviruses.\n(A) Fragile viruses often visualised with RNA spewing from the inside (B) Elongate viruses (C) Icosahedral viruses with envelope (D) Very large viruses\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. Paramyxoviruses are fragile viruses often visualised with RNA spewing from the inside. The answer is (A).\n\nQ: The most important goal of a behavioral intervention is:\n(A) Change in behavior (B) Comprehensive coverage (C) Effective use of behavioral theory (D) Sustained behavior change\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. The prim goal of a behavioral intervention is to cause sustained behavior change. The answer is (D).\n\nQ: A key factor facilitating the application of nested case-control studies from the MACS was:\n(A) Data collection (B) Establishment of a repository of biologic specimens (C) Participant interest (D) Administration of the questionnaire by staff\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. The Multicenter AIDS Cohort Study's use of nested case-control studies was facilitated by the establishment of a repository of biologic specimens. The answer is (B).\n\nQ: Why are parvoviruses a highly impactful parasite?\n(A) Because they have no nucleic acid (B) They require a helper virus (C) Only replicate in dividing cells (D) Can integrate into host chromosomes\nA: Let's think step by step. We refer to Wikipedia articles on virology for help. Paroviruses are highly impactful because they do not have nucleic acid. The answer is (A).", "world_religions": "The following are multiple choice questions (with answers) about world religions.\n\nQ: How can the Upanishads be characterized?\n(A) Ritual texts (B) Philosophical texts (C) Hymns (D) Origin stories\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The Upanishads are the most recent part of Vedas (the oldest scriptures in Hinduism) and supplied the basis of later Hindu philosophy. So they are philosophical texts. The answer is (B).\n\nQ: What is the Second Gem in Buddhism?\n(A) The Dharma (B) The Sangha (C) The Buddha (D) The Bodhisattva\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The Second Gem in Buddhism is The Dharma. The answer is (A).\n\nQ: Which Japanese government promoted a kind of national cult based on the emperor and his associations with kami?\n(A) Honen (B) Tanaka (C) Tokugawa (D) Meiji\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The promotion of a national cult based on the emperor and his associations with Kami happened during the reign of Emperor Meiji (1852-1912). The answer is (D).\n\nQ: In which dynasty was the \"Mandate of Heaven\" developed to legitimatize the new rulers?\n(A) Shang (B) Zhou (C) Han (D) Xia\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. The \"Mandate of Heaven\" was developed as an ancient Chinese philosophical concept during the Zhou Dynasty (1046-256 BCE). The answer is (B).\n\nQ: What is the sign of the covenant for Jewish males?\n(A) The rainbow (B) Circumcision (C) A son (D) Bar mitzvah\nA: Let's think step by step. We refer to Wikipedia articles on world religions for help. In Judaism, the most distinctive sign of the covenant is circumcision (brit milah). The answer is (B)."} diff --git a/lm_eval/tasks/mmlu/_generate_configs.py b/lm_eval/tasks/mmlu/_generate_configs.py index f0dbd6bd..542e11b2 100644 --- a/lm_eval/tasks/mmlu/_generate_configs.py +++ b/lm_eval/tasks/mmlu/_generate_configs.py @@ -74,12 +74,8 @@ SUBJECTS = [ def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--base_yaml_path", required=True) - parser.add_argument( - "--save_prefix_path", default="flan" - ) - parser.add_argument( - "--cot_prompt_path", default=None - ) + parser.add_argument("--save_prefix_path", default="flan") + parser.add_argument("--cot_prompt_path", default=None) parser.add_argument("--task_prefix", default="") return parser.parse_args() @@ -95,6 +91,7 @@ if __name__ == "__main__": if args.cot_prompt_path is not None: import json + with open(args.cot_prompt_path) as f: cot_file = json.load(f) @@ -106,7 +103,9 @@ if __name__ == "__main__": yaml_dict = { "include": base_yaml_name, - "task": f"mmlu_{args.task_prefix}_{subject}" if args.task_prefix != "" else f"mmlu_{subject}", + "task": f"mmlu_{args.task_prefix}_{subject}" + if args.task_prefix != "" + else f"mmlu_{subject}", "dataset_name": subject, "description": description, } @@ -114,4 +113,10 @@ if __name__ == "__main__": file_save_path = args.save_prefix_path + f"_{subject}.yaml" eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}") with open(file_save_path, "w") as yaml_file: - yaml.dump(yaml_dict, yaml_file, width=float("inf"), allow_unicode=True, default_style='"') + yaml.dump( + yaml_dict, + yaml_file, + width=float("inf"), + allow_unicode=True, + default_style='"', + ) diff --git a/lm_eval/tasks/mmlu/default/_default_template_yaml b/lm_eval/tasks/mmlu/default/_default_template_yaml index 757f7b5e..1064a2d1 100644 --- a/lm_eval/tasks/mmlu/default/_default_template_yaml +++ b/lm_eval/tasks/mmlu/default/_default_template_yaml @@ -12,4 +12,4 @@ metric_list: higher_is_better: true - metric: acc_norm aggregation: mean - higher_is_better: true \ No newline at end of file + higher_is_better: true diff --git a/lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml b/lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml index ffa9ee87..8461b93a 100644 --- a/lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml +++ b/lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml @@ -21,4 +21,4 @@ metric_list: aggregation: mean higher_is_better: true ignore_case: true - ignore_punctuation: true \ No newline at end of file + ignore_punctuation: true diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml index 2d5d92ef..5db2981a 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml @@ -12,4 +12,4 @@ metric_list: higher_is_better: true - metric: acc_norm aggregation: mean - higher_is_better: true \ No newline at end of file + higher_is_better: true -- GitLab From c640376551f198e598d930333517010322b87527 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 21 Sep 2023 14:12:40 +0000 Subject: [PATCH 220/264] update --- lm_eval/tasks/benchmarks/flan_held_out.yaml | 8 +- lm_eval/tasks/benchmarks/t0_eval.yaml | 148 ++++++++++---------- 2 files changed, 78 insertions(+), 78 deletions(-) diff --git a/lm_eval/tasks/benchmarks/flan_held_out.yaml b/lm_eval/tasks/benchmarks/flan_held_out.yaml index f61affea..150e9477 100644 --- a/lm_eval/tasks/benchmarks/flan_held_out.yaml +++ b/lm_eval/tasks/benchmarks/flan_held_out.yaml @@ -3,10 +3,10 @@ task: # BBH - bbh_flan_zeroshot - bbh_flan_fewshot - - bbh_flan_cot_fewshot - - bbh_flan_cot_zeroshot + # - bbh_flan_cot_fewshot + # - bbh_flan_cot_zeroshot # MMLU - mmlu_flan_n_shot_generative - mmlu_flan_n_shot_loglikelihood - - mmlu_flan_cot_zeroshot - - mmlu_flan_cot_fewshot + # - mmlu_flan_cot_zeroshot + # - mmlu_flan_cot_fewshot diff --git a/lm_eval/tasks/benchmarks/t0_eval.yaml b/lm_eval/tasks/benchmarks/t0_eval.yaml index 9cd25b51..788122e9 100644 --- a/lm_eval/tasks/benchmarks/t0_eval.yaml +++ b/lm_eval/tasks/benchmarks/t0_eval.yaml @@ -26,80 +26,80 @@ task: higher_is_better: true ignore_case: true ignore_punctuation: true - # # Natural Language Inference - # - dataset_path: super_glue - # dataset_name: cb - # use_prompt: promptsource:* - # training_split: train - # validation_split: validation - # output_type: greedy_until - # metric_list: - # - metric: exact_match - # aggregation: mean - # higher_is_better: true - # ignore_case: true - # ignore_punctuation: true - # - dataset_path: super_glue - # dataset_name: rte - # use_prompt: promptsource:* - # training_split: train - # validation_split: validation - # output_type: greedy_until - # metric_list: - # - metric: exact_match - # aggregation: mean - # higher_is_better: true - # ignore_case: true - # ignore_punctuation: true - # - task: anli_r1 - # dataset_path: anli - # use_prompt: promptsource:* - # training_split: train_r1 - # validation_split: dev_r1 - # output_type: greedy_until - # metric_list: - # - metric: exact_match - # aggregation: mean - # higher_is_better: true - # ignore_case: true - # ignore_punctuation: true - # - task: anli_r2 - # dataset_path: anli - # use_prompt: promptsource:* - # training_split: train_r2 - # validation_split: dev_r2 - # output_type: greedy_until - # metric_list: - # - metric: exact_match - # aggregation: mean - # higher_is_better: true - # ignore_case: true - # ignore_punctuation: true - # - task: anli_r3 - # dataset_path: anli - # use_prompt: promptsource:* - # training_split: train_r3 - # validation_split: dev_r3 - # output_type: greedy_until - # metric_list: - # - metric: exact_match - # aggregation: mean - # higher_is_better: true - # ignore_case: true - # ignore_punctuation: true - # # Sentence Completion - # - dataset_path: super_glue - # dataset_name: copa - # use_prompt: promptsource:* - # training_split: train - # validation_split: validation - # output_type: greedy_until - # metric_list: - # - metric: exact_match - # aggregation: mean - # higher_is_better: true - # ignore_case: true - # ignore_punctuation: true + # Natural Language Inference + - dataset_path: super_glue + dataset_name: cb + use_prompt: promptsource:* + training_split: train + validation_split: validation + output_type: greedy_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + - dataset_path: super_glue + dataset_name: rte + use_prompt: promptsource:* + training_split: train + validation_split: validation + output_type: greedy_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + - task: anli_r1 + dataset_path: anli + use_prompt: promptsource:* + training_split: train_r1 + validation_split: dev_r1 + output_type: greedy_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + - task: anli_r2 + dataset_path: anli + use_prompt: promptsource:* + training_split: train_r2 + validation_split: dev_r2 + output_type: greedy_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + - task: anli_r3 + dataset_path: anli + use_prompt: promptsource:* + training_split: train_r3 + validation_split: dev_r3 + output_type: greedy_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + # Sentence Completion + - dataset_path: super_glue + dataset_name: copa + use_prompt: promptsource:* + training_split: train + validation_split: validation + output_type: greedy_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true # Natural Language Inference - dataset_path: hellaswag use_prompt: promptsource:* -- GitLab From c8f56b1788ab5f08e7bf9da9cadb18d6711e3161 Mon Sep 17 00:00:00 2001 From: Hailey Schoelkopf <65563625+haileyschoelkopf@users.noreply.github.com> Date: Thu, 21 Sep 2023 11:04:26 -0400 Subject: [PATCH 221/264] Note [math] extra --- lm_eval/tasks/minerva_math/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/minerva_math/README.md b/lm_eval/tasks/minerva_math/README.md index fb2845e2..7ca5d652 100644 --- a/lm_eval/tasks/minerva_math/README.md +++ b/lm_eval/tasks/minerva_math/README.md @@ -6,7 +6,7 @@ https://arxiv.org/abs/2103.03874 Many intellectual endeavors require mathematical problem solving, but this skill remains beyond the capabilities of computers. To measure this ability in machine learning models, we introduce MATH, a new dataset of 12,500 challenging competition mathematics problems. Each problem in MATH has a full step-by-step solution which can be used to teach models to generate answer derivations and explanations. -NOTE: The few-shot and the generated answer extraction is based on the [Minerva](https://arxiv.org/abs/2206.14858) and exact match equivalence is calculated using the `sympy` library +NOTE: The few-shot and the generated answer extraction is based on the [Minerva](https://arxiv.org/abs/2206.14858) and exact match equivalence is calculated using the `sympy` library. This requires additional dependencies, which can be installed via the `lm-eval[math]` extra. Homepage: https://github.com/hendrycks/math -- GitLab From 2906a565108f642e4d5eea51269a62f12d8f7f6a Mon Sep 17 00:00:00 2001 From: Hailey Schoelkopf <65563625+haileyschoelkopf@users.noreply.github.com> Date: Thu, 21 Sep 2023 11:05:43 -0400 Subject: [PATCH 222/264] Add [math] optional dependency, make antlr req more stringent --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a73505d8..16bc2b65 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,8 +36,6 @@ dependencies = [ "tqdm-multiprocess", "transformers>=4.1", "zstandard", - "sympy>=1.12", - "antlr4-python3-runtime >= 4.11, < 4.12" ] [tool.setuptools] @@ -66,6 +64,7 @@ linting = [ ] testing = ["pytest", "pytest-cov", "pytest-xdist"] multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1", "pycountry"] +math = ["sympy>=1.12", "antlr4-python3-runtime==4.11"] sentencepiece = ["sentencepiece>=0.1.98", "protobuf>=4.22.1"] promptsource = [ "promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource" -- GitLab From 43c91b5ca84df830176f2f63455a500f155c8c8d Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 22 Sep 2023 07:31:20 -0400 Subject: [PATCH 223/264] hotfix bugs --- lm_eval/api/task.py | 2 +- lm_eval/tasks/minerva_math/utils.py | 10 ++++++++-- lm_eval/utils.py | 10 +++++++++- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index b2081579..42dbeb93 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -631,7 +631,7 @@ class ConfigurableTask(Task): if self.fewshot_docs() is not None: self.sampler = samplers.get_sampler( - self.config.fewshot_config.get("sampler", "default") + self.config.fewshot_config.get("sampler", "default") if self.config.fewshot_config else "default" )(list(self.fewshot_docs()), self, rnd=random.Random(1234)) if self.has_test_docs(): diff --git a/lm_eval/tasks/minerva_math/utils.py b/lm_eval/tasks/minerva_math/utils.py index 87684765..3654b9a6 100644 --- a/lm_eval/tasks/minerva_math/utils.py +++ b/lm_eval/tasks/minerva_math/utils.py @@ -1,11 +1,17 @@ import datasets import re -import sympy -from sympy.parsing.latex import parse_latex import signal from lm_eval.logger import eval_logger from typing import Optional, List, Dict +try: + import sympy + from sympy.parsing.latex import parse_latex +except ModuleNotFoundError: + raise Exception( + "`sympy` is required for generating translation task prompt templates. \ +please install sympy via pip install lm-eval[math] or pip install -e .[math]", + ) # taken from # https://github.com/wellecks/lm-evaluation-harness/blob/master/lm_eval/tasks/minerva_math.py diff --git a/lm_eval/utils.py b/lm_eval/utils.py index ec86166e..80a85ffb 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -45,6 +45,14 @@ def escaped_split(text, sep_char, maxsplit=-1): return re.split(r"(? Date: Fri, 22 Sep 2023 08:00:55 -0400 Subject: [PATCH 224/264] trigger precommit --- lm_eval/api/task.py | 4 +++- lm_eval/utils.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 42dbeb93..3f5d18dc 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -631,7 +631,9 @@ class ConfigurableTask(Task): if self.fewshot_docs() is not None: self.sampler = samplers.get_sampler( - self.config.fewshot_config.get("sampler", "default") if self.config.fewshot_config else "default" + self.config.fewshot_config.get("sampler", "default") + if self.config.fewshot_config + else "default" )(list(self.fewshot_docs()), self, rnd=random.Random(1234)) if self.has_test_docs(): diff --git a/lm_eval/utils.py b/lm_eval/utils.py index 80a85ffb..71189aa0 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -63,7 +63,9 @@ def simple_parse_args_string(args_string): if not args_string: return {} arg_list = [arg for arg in args_string.split(",") if arg] - args_dict = {k: handle_arg_string(v) for k, v in [arg.split("=") for arg in arg_list]} + args_dict = { + k: handle_arg_string(v) for k, v in [arg.split("=") for arg in arg_list] + } return args_dict -- GitLab From d98159b2dfe731736b1aea759282ff32f51eb0e7 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Fri, 22 Sep 2023 08:04:36 -0400 Subject: [PATCH 225/264] trigger precommit --- lm_eval/tasks/minerva_math/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lm_eval/tasks/minerva_math/utils.py b/lm_eval/tasks/minerva_math/utils.py index 3654b9a6..293fa909 100644 --- a/lm_eval/tasks/minerva_math/utils.py +++ b/lm_eval/tasks/minerva_math/utils.py @@ -13,6 +13,7 @@ except ModuleNotFoundError: please install sympy via pip install lm-eval[math] or pip install -e .[math]", ) + # taken from # https://github.com/wellecks/lm-evaluation-harness/blob/master/lm_eval/tasks/minerva_math.py def doc_to_text(doc: dict) -> str: -- GitLab From 94346b7ed8a699444d5f7b9defbb424134a45d7a Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 22 Sep 2023 17:52:33 +0200 Subject: [PATCH 226/264] Allow forced import of metrics from the HF Evaluate library --- lm_eval/api/registry.py | 29 ++++++++++++++--------------- lm_eval/api/task.py | 5 +++-- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/lm_eval/api/registry.py b/lm_eval/api/registry.py index 85d30f0f..e5da4b44 100644 --- a/lm_eval/api/registry.py +++ b/lm_eval/api/registry.py @@ -117,24 +117,23 @@ def register_metric(**args): return decorate -def get_metric(name): +def get_metric(name, hf_evaluate_metric=False): + if not hf_evaluate_metric: + if name in METRIC_REGISTRY: + return METRIC_REGISTRY[name] + else: + eval_logger.warning( + f"Could not find registered metric '{name}' in lm-eval, searching in HF Evaluate library..." + ) + try: - return METRIC_REGISTRY[name] - except KeyError: - # TODO: change this print to logging? - print( - f"Could not find registered metric '{name}' in lm-eval, \ -searching in HF Evaluate library..." + metric_object = evaluate.load(name) + return metric_object.compute + except Exception: + eval_logger.error( + f"{name} not found in the evaluate library! Please check https://huggingface.co/evaluate-metric", ) - try: - metric_object = evaluate.load(name) - return metric_object.compute - except Exception: - eval_logger.error( - "{} not found in the evaluate library!".format(name), - "Please check https://huggingface.co/evaluate-metric", - ) def register_aggregation(name): diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 3f5d18dc..ba4ac150 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -555,8 +555,9 @@ class ConfigurableTask(Task): kwargs = { key: metric_config[key] for key in metric_config - if key not in ["metric", "aggregation", "higher_is_better"] + if key not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"] } + hf_evaluate_metric = "hf_evaluate" in metric_config and metric_config["hf_evaluate"] == True if self.config.process_results is not None: self._metric_fn_list[metric_name] = None @@ -567,7 +568,7 @@ class ConfigurableTask(Task): self._metric_fn_list[metric_name] = metric_fn self._metric_fn_kwargs[metric_name] = kwargs else: - self._metric_fn_list[metric_name] = get_metric(metric_name) + self._metric_fn_list[metric_name] = get_metric(metric_name, hf_evaluate_metric) self._metric_fn_kwargs[metric_name] = kwargs if "aggregation" in metric_config: -- GitLab From 5b26b3b0e01f5d2bf4969ce32dd0cb0b1d7e4549 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 22 Sep 2023 17:57:49 +0200 Subject: [PATCH 227/264] Cast gold to the same type as result --- lm_eval/api/task.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index ba4ac150..d1aa8c58 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -1069,6 +1069,7 @@ class ConfigurableTask(Task): elif self.OUTPUT_TYPE == "greedy_until": gold = self.doc_to_target(doc) + result = results[0] if self.config.doc_to_choice is not None: # If you set doc_to_choice, # it assumes that doc_to_target returns a number. @@ -1077,10 +1078,10 @@ class ConfigurableTask(Task): # we expect multiple_targets to be a list. elif self.multiple_target: gold = list(gold) - else: - gold = str(gold) + elif type(gold) != type(result): + # cast gold to the same type as result + gold = type(result)(gold) - result = results[0] for metric in self._metric_fn_list.keys(): if self.multiple_target: # in the case where we have multiple targets, -- GitLab From 90b261e5786ec7d1dcc0f3ef26431d5142dfa7bd Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 22 Sep 2023 18:01:25 +0200 Subject: [PATCH 228/264] Add transformation filters: lowercase, uppercase, map --- lm_eval/filters/__init__.py | 4 +++ lm_eval/filters/transformation.py | 47 +++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 lm_eval/filters/transformation.py diff --git a/lm_eval/filters/__init__.py b/lm_eval/filters/__init__.py index 54578cb4..c74ac015 100644 --- a/lm_eval/filters/__init__.py +++ b/lm_eval/filters/__init__.py @@ -1,6 +1,7 @@ from lm_eval.api.filter import FilterEnsemble from . import selection from . import extraction +from . import transformation FILTER_REGISTRY = { @@ -9,6 +10,9 @@ FILTER_REGISTRY = { "majority_vote": selection.MajorityVoteFilter, "take_first_k": selection.TakeKFilter, "remove_whitespace": extraction.WhitespaceFilter, + "lowercase": transformation.LowercaseFilter, + "uppercase": transformation.UppercaseFilter, + "map": transformation.MapFilter, # TODO: implement this filter. either it should take in an arbitrary "scoring"/reward function # that takes an input and returns a scalar and then should select the max reward, # or should implement different filters for different ways of handling a reward model's inference. diff --git a/lm_eval/filters/transformation.py b/lm_eval/filters/transformation.py new file mode 100644 index 00000000..16e31a7c --- /dev/null +++ b/lm_eval/filters/transformation.py @@ -0,0 +1,47 @@ +from lm_eval.api.filter import Filter + + +class LowercaseFilter(Filter): + def __init__(self) -> None: + pass + + def apply(self, resps, docs): + def filter_set(inst): + return [resp.lower() for resp in inst] + + return [filter_set(resp) for resp in resps] + + +class UppercaseFilter(Filter): + def __init__(self) -> None: + pass + + def apply(self, resps, docs): + def filter_set(inst): + return [resp.upper() for resp in inst] + + return [filter_set(resp) for resp in resps] + +class MapFilter(Filter): + def __init__(self, mapping_dict: dict = {}, default_value = None) -> None: + """ + Initializes the MapFilter with a given mapping dictionary and default value. + + Args: + - mapping_dict (dict): A dictionary containing the key-value mappings. + Default is an empty dictionary. + - default_value (Any): The value to be returned when a key is not found in the mapping_dict. + Default is None. + + Example: + mapper = MapFilter({'A': 1, 'B': 2}, default_value=0) + """ + assert isinstance(mapping_dict, dict), "Provided mapping_dict is not a dictionary" + self.mapping_dict = mapping_dict + self.default_value = default_value + + def apply(self, resps, docs): + def filter_set(inst): + return [self.mapping_dict.get(resp, self.default_value) for resp in inst] + + return [filter_set(resp) for resp in resps] -- GitLab From b4c72cb9808bc23bb65b3eb56764361f6075da9f Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 22 Sep 2023 18:09:30 +0200 Subject: [PATCH 229/264] Update new_task_guide.md --- docs/new_task_guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/new_task_guide.md b/docs/new_task_guide.md index 54745f47..0641baae 100644 --- a/docs/new_task_guide.md +++ b/docs/new_task_guide.md @@ -214,7 +214,7 @@ metric_list: ``` `aggregation` and `higher_is_better` can optionally be left out to default to the manually-set defaults if using a natively supported metric, otherwise it must be defined explicitly (for example, when using a custom metric implemented as a function). -For a full list of natively supported metrics and aggregation functions see `docs/advanced_task_guide.md`. All metrics supported in [HuggingFace Evaluate](https://github.com/huggingface/evaluate/tree/main/metrics) can also be used, and will be loaded if a given metric name is not one natively supported in `lm-eval`. +For a full list of natively supported metrics and aggregation functions see `docs/advanced_task_guide.md`. All metrics supported in [HuggingFace Evaluate](https://github.com/huggingface/evaluate/tree/main/metrics) can also be used, and will be loaded if a given metric name is not one natively supported in `lm-eval` or `hf_evaluate` is set to `true`. ### Optional, More Advanced Setup -- GitLab From 34d1ac6bc35ca94808bac938d33e96b8577ac2dc Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 22 Sep 2023 18:37:38 +0200 Subject: [PATCH 230/264] Use 'is' for boolean comparison --- lm_eval/api/task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index d1aa8c58..95beb803 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -557,7 +557,7 @@ class ConfigurableTask(Task): for key in metric_config if key not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"] } - hf_evaluate_metric = "hf_evaluate" in metric_config and metric_config["hf_evaluate"] == True + hf_evaluate_metric = "hf_evaluate" in metric_config and metric_config["hf_evaluate"] is True if self.config.process_results is not None: self._metric_fn_list[metric_name] = None -- GitLab From ad4ab52aa4f0952fb4958518d3942b32eb914d28 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 22 Sep 2023 18:43:37 +0200 Subject: [PATCH 231/264] Fix formatting --- lm_eval/filters/transformation.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lm_eval/filters/transformation.py b/lm_eval/filters/transformation.py index 16e31a7c..f1239638 100644 --- a/lm_eval/filters/transformation.py +++ b/lm_eval/filters/transformation.py @@ -11,7 +11,7 @@ class LowercaseFilter(Filter): return [filter_set(resp) for resp in resps] - + class UppercaseFilter(Filter): def __init__(self) -> None: pass @@ -21,9 +21,10 @@ class UppercaseFilter(Filter): return [resp.upper() for resp in inst] return [filter_set(resp) for resp in resps] - + + class MapFilter(Filter): - def __init__(self, mapping_dict: dict = {}, default_value = None) -> None: + def __init__(self, mapping_dict: dict = {}, default_value=None) -> None: """ Initializes the MapFilter with a given mapping dictionary and default value. -- GitLab From 8310e67e7de114ef2c41e249cdebda9f2779c1cb Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 22 Sep 2023 18:59:44 +0200 Subject: [PATCH 232/264] Fix formatting --- lm_eval/api/registry.py | 2 +- lm_eval/api/task.py | 12 +++++++++--- lm_eval/filters/transformation.py | 4 +++- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/lm_eval/api/registry.py b/lm_eval/api/registry.py index e5da4b44..53e5771a 100644 --- a/lm_eval/api/registry.py +++ b/lm_eval/api/registry.py @@ -126,7 +126,7 @@ def get_metric(name, hf_evaluate_metric=False): eval_logger.warning( f"Could not find registered metric '{name}' in lm-eval, searching in HF Evaluate library..." ) - + try: metric_object = evaluate.load(name) return metric_object.compute diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 95beb803..9c46214b 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -555,9 +555,13 @@ class ConfigurableTask(Task): kwargs = { key: metric_config[key] for key in metric_config - if key not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"] + if key + not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"] } - hf_evaluate_metric = "hf_evaluate" in metric_config and metric_config["hf_evaluate"] is True + hf_evaluate_metric = ( + "hf_evaluate" in metric_config + and metric_config["hf_evaluate"] is True + ) if self.config.process_results is not None: self._metric_fn_list[metric_name] = None @@ -568,7 +572,9 @@ class ConfigurableTask(Task): self._metric_fn_list[metric_name] = metric_fn self._metric_fn_kwargs[metric_name] = kwargs else: - self._metric_fn_list[metric_name] = get_metric(metric_name, hf_evaluate_metric) + self._metric_fn_list[metric_name] = get_metric( + metric_name, hf_evaluate_metric + ) self._metric_fn_kwargs[metric_name] = kwargs if "aggregation" in metric_config: diff --git a/lm_eval/filters/transformation.py b/lm_eval/filters/transformation.py index f1239638..f254b0db 100644 --- a/lm_eval/filters/transformation.py +++ b/lm_eval/filters/transformation.py @@ -37,7 +37,9 @@ class MapFilter(Filter): Example: mapper = MapFilter({'A': 1, 'B': 2}, default_value=0) """ - assert isinstance(mapping_dict, dict), "Provided mapping_dict is not a dictionary" + assert isinstance( + mapping_dict, dict + ), "Provided mapping_dict is not a dictionary" self.mapping_dict = mapping_dict self.default_value = default_value -- GitLab From 0dc556dcf6285fbb3f49fb726bf3209431d93d05 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 25 Sep 2023 07:02:36 +0000 Subject: [PATCH 233/264] add function to add both task and benchmark --- lm_eval/tasks/__init__.py | 72 +++++++++++-------- .../flan/prompt_templates/flan_anli.yaml | 29 -------- .../flan/prompt_templates/flan_arc.yaml | 23 ------ .../flan/prompt_templates/flan_boolq.yaml | 33 --------- .../flan/prompt_templates/flan_rte.yaml | 29 -------- main.py | 4 +- 6 files changed, 46 insertions(+), 144 deletions(-) delete mode 100644 lm_eval/tasks/benchmarks/flan/prompt_templates/flan_anli.yaml delete mode 100644 lm_eval/tasks/benchmarks/flan/prompt_templates/flan_arc.yaml delete mode 100644 lm_eval/tasks/benchmarks/flan/prompt_templates/flan_boolq.yaml delete mode 100644 lm_eval/tasks/benchmarks/flan/prompt_templates/flan_rte.yaml diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index c6a78e9c..b935d106 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -45,6 +45,18 @@ def register_configurable_group(config: Dict[str, str]) -> int: task_list = [task for task in all_task_list if type(task) == str] for task_config in config_list: + # if "task" in task_config: + # task = task_config["task"] + # if task in GROUP_REGISTRY: + # task_list = GROUP_REGISTRY[task] + # elif task in TASK_REGISTRY: + # task_list = [TASK_REGISTRY[task]] + + # for _task in task_list: + # task_config = { + # **_task["CONFIG"], + # **task_config + # } var_configs = check_prompt_config( { **task_config, @@ -109,36 +121,40 @@ def include_task_folder(task_dir: str, register_task=True) -> None: Calling this function """ for root, subdirs, file_list in os.walk(task_dir): - if (subdirs == [] or subdirs == ["__pycache__"]) and (len(file_list) > 0): - for f in file_list: - if f.endswith(".yaml"): - yaml_path = os.path.join(root, f) - try: - config = utils.load_yaml_config(yaml_path) - - if register_task: - all_configs = check_prompt_config(config) - for config in all_configs: - register_configurable_task(config) - else: - # If a `task` in config is a list, - # that means it's a benchmark - if type(config["task"]) == list: - register_configurable_group(config) - - except Exception as error: - eval_logger.warning( - "Failed to load config in\n" - f" {yaml_path}\n" - " Config will not be added to registry\n" - f" Error: {error}" - ) - + # if (subdirs == [] or subdirs == ["__pycache__"]) and (len(file_list) > 0): + for f in file_list: + if f.endswith(".yaml"): + yaml_path = os.path.join(root, f) + try: + config = utils.load_yaml_config(yaml_path) + + if register_task: + all_configs = check_prompt_config(config) + for config in all_configs: + register_configurable_task(config) + else: + # If a `task` in config is a list, + # that means it's a benchmark + if type(config["task"]) == list: + register_configurable_group(config) + + except Exception as error: + eval_logger.warning( + "Failed to load config in\n" + f" {yaml_path}\n" + " Config will not be added to registry\n" + f" Error: {error}" + ) + + +def include_path(task_dir): + include_task_folder(task_dir) + # Register Benchmarks after all tasks have been added + include_task_folder(task_dir, register_task=False) + return 0 task_dir = os.path.dirname(os.path.abspath(__file__)) + "/" -include_task_folder(task_dir) -# Register Benchmarks after all tasks have been added -include_task_folder(task_dir, register_task=False) +include_path(task_dir) def get_task(task_name, config): diff --git a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_anli.yaml b/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_anli.yaml deleted file mode 100644 index 6ff78840..00000000 --- a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_anli.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Flan Prompt Templates -prompts: - "template-0": - doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is" - doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" - "template-1": - doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" - "template-2": - doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" - "template-3": - doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" - "template-4": - doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:" - doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" - "template-5": - doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" - doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" - "template-6": - doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" - "template-7": - doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" - "template-8": - doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" - doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" diff --git a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_arc.yaml b/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_arc.yaml deleted file mode 100644 index 4ee34e65..00000000 --- a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_arc.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Flan Prompt Templates -prompts: - "template-0": - doc_to_text: "{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" - doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" - "template-1": - doc_to_text: "Question: {{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}\nAnswer:" - doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" - "template-2": - doc_to_text: "Question: {{question}}\n\nWhat is the correct answer to the question from the following choices?\nOPTIONS:\n- {{choices.text|join('\n- ')}}" - doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" - "template-3": - doc_to_text: "Q: {{question}}\nWhat is the correct answer to this question?\nOPTIONS:\n- {{choices.text|join('\n- ')}}...A:" - doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" - "template-4": - doc_to_text: "Choose your answer?\n\n{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" - doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" - "template-5": - doc_to_text: "Answer the question\n\n{{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}" - doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" - "template-6": - doc_to_text: "{{question}}\n\nPick the answer from these options\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" - doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" diff --git a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_boolq.yaml b/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_boolq.yaml deleted file mode 100644 index f8c8ebfc..00000000 --- a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_boolq.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# Flan Prompt Templates -prompts: - "template-0": - doc_to_text: "{{passage}}\n\nCan we conclude that {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{['no', 'yes'][label]}}" - "template-1": - doc_to_text: "{{passage}}\n\nIs it true that {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{['no', 'yes'][label]}}" - "template-2": - doc_to_text: "{{passage}}\n\n{{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{['no', 'yes'][label]}}" - "template-3": - doc_to_text: "Text: {{passage}}\n\nQuestion: {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{['no', 'yes'][label]}}" - "template-4": - doc_to_text: "{{passage}}\n\nWhat's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{['no', 'yes'][label]}}" - "template-5": - doc_to_text: "{{passage}}\nBased on the above text what's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{['no', 'yes'][label]}}" - "template-6": - doc_to_text: "{{passage}}\nAnswer this question making sure that the answer is supposed by the text: {{question}}?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{['no', 'yes'][label]}}" - "template-7": - doc_to_text: "{{passage}}\n\nIs the following statement correct based on the text\n\n{{question}}\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{['no', 'yes'][label]}}" - "template-8": - # doc_to_text: "{{title}}\n\n{{passage}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes" - doc_to_text: "{{passage}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{['no', 'yes'][label]}}" - "template-9": - doc_to_text: "Is it true that {{question}} based on the following text?\n\n{{passage}}\n\nOPTIONS:\n- no\n- yes" - doc_to_target: "{{['no', 'yes'][label]}}" diff --git a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_rte.yaml b/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_rte.yaml deleted file mode 100644 index 7893eae4..00000000 --- a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_rte.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Flan Prompt Templates -prompts: - "template-0": - doc_to_text: "{{premise}}\n\nQuestion with options: Based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{['yes', 'no'][label]}}" - "template-1": - doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that the sentence below is true?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{['yes', 'no'][label]}}" - "template-2": - doc_to_text: "{{premise}}\n\nQ with options: Can we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{['yes', 'no'][label]}}" - "template-3": - doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{['yes', 'no'][label]}}" - "template-4": - doc_to_text: "{{premise}}\nOPTIONS:\n- yes\n- no\nQuestion: Can we infer the following?\n{{hypothesis}}" - doc_to_target: "{{['yes', 'no'][label]}}" - "template-5": - doc_to_text: "Read the following paragraph and determine if the hypothesis is true. Select from options at the end:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nThe answer is" - doc_to_target: "{{['yes', 'no'][label]}}" - "template-6": - doc_to_text: "Read the text and determine if the sentence is true:\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nA:" - doc_to_target: "{{['yes', 'no'][label]}}" - "template-7": - doc_to_text: "Question with options: can we draw the following hypothesis from the context? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- yes\n- no\nA:" - doc_to_target: "{{['yes', 'no'][label]}}" - "template-8": - doc_to_text: "Determine if the sentence is true based on the text below. Choose from options.\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- yes\n- no" - doc_to_target: "{{['yes', 'no'][label]}}" diff --git a/main.py b/main.py index 96207884..1e6302b7 100644 --- a/main.py +++ b/main.py @@ -10,7 +10,7 @@ from pathlib import Path from lm_eval import evaluator, utils from lm_eval.api.registry import ALL_TASKS from lm_eval.logger import eval_logger, SPACING -from lm_eval.tasks import include_task_folder +from lm_eval.tasks import include_path os.environ["TOKENIZERS_PARALLELISM"] = "false" @@ -111,7 +111,7 @@ def main() -> None: if args.include_path is not None: eval_logger.info(f"Including path: {args.include_path}") - include_task_folder(args.include_path) + include_path(args.include_path) if args.tasks is None: task_names = ALL_TASKS -- GitLab From 694af7d656989d6a22623e5245db655569b40390 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Mon, 25 Sep 2023 14:00:14 +0000 Subject: [PATCH 234/264] remove templerature as do_sample is False --- lm_eval/api/task.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 95c08af4..64c0d7e8 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -117,7 +117,6 @@ class TaskConfig(dict): if self.fewshot_delimiter is None else [self.fewshot_delimiter], "do_sample": False, - "temperature": 0.0, } # TODO: how to make TaskConfigs be de- and re-serializable, even when using the !function constructor? -- GitLab From 3f090027112a6e9ea45d7ee2deb57edac5f03ea2 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 26 Sep 2023 03:44:17 +0000 Subject: [PATCH 235/264] moved files --- lm_eval/prompts/__init__.py | 7 +++++-- lm_eval/tasks/__init__.py | 11 +++++++++-- lm_eval/tasks/benchmarks/{ => flan}/flan_anli.yaml | 6 +++--- lm_eval/tasks/benchmarks/flan/flan_arc.yaml | 14 ++++++++++++++ .../tasks/benchmarks/{ => flan}/flan_boolq.yaml | 2 +- lm_eval/tasks/benchmarks/{ => flan}/flan_cot.yaml | 0 lm_eval/tasks/benchmarks/flan/flan_held_in.yaml | 6 ++++++ .../{flan_held_in.yaml => flan/flan_held_in_yaml} | 14 +++++++------- .../tasks/benchmarks/{ => flan}/flan_held_out.yaml | 0 lm_eval/tasks/benchmarks/{ => flan}/flan_rte.yaml | 0 .../prompt_templates/{flan_anli.yaml => anli.yaml} | 0 .../prompt_templates/{flan_arc.yaml => arc.yaml} | 0 .../{flan_boolq.yaml => boolq.yaml} | 0 .../prompt_templates/{flan_rte.yaml => rte.yaml} | 0 lm_eval/utils.py | 4 +++- 15 files changed, 48 insertions(+), 16 deletions(-) rename lm_eval/tasks/benchmarks/{ => flan}/flan_anli.yaml (70%) create mode 100644 lm_eval/tasks/benchmarks/flan/flan_arc.yaml rename lm_eval/tasks/benchmarks/{ => flan}/flan_boolq.yaml (74%) rename lm_eval/tasks/benchmarks/{ => flan}/flan_cot.yaml (100%) create mode 100644 lm_eval/tasks/benchmarks/flan/flan_held_in.yaml rename lm_eval/tasks/benchmarks/{flan_held_in.yaml => flan/flan_held_in_yaml} (72%) rename lm_eval/tasks/benchmarks/{ => flan}/flan_held_out.yaml (100%) rename lm_eval/tasks/benchmarks/{ => flan}/flan_rte.yaml (100%) rename lm_eval/tasks/benchmarks/flan/prompt_templates/{flan_anli.yaml => anli.yaml} (100%) rename lm_eval/tasks/benchmarks/flan/prompt_templates/{flan_arc.yaml => arc.yaml} (100%) rename lm_eval/tasks/benchmarks/flan/prompt_templates/{flan_boolq.yaml => boolq.yaml} (100%) rename lm_eval/tasks/benchmarks/flan/prompt_templates/{flan_rte.yaml => rte.yaml} (100%) diff --git a/lm_eval/prompts/__init__.py b/lm_eval/prompts/__init__.py index 9662cd8e..545f3331 100644 --- a/lm_eval/prompts/__init__.py +++ b/lm_eval/prompts/__init__.py @@ -65,7 +65,7 @@ def get_prompt(prompt_id: str, dataset_name: str = None, subset_name: str = None ) -def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, **kwargs): +def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, file_dir=None, **kwargs): category_name, prompt_name = use_prompt.split(":") @@ -84,6 +84,9 @@ def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, **kwa elif ".yaml" in category_name: import yaml + if file_dir is not None: + category_name = os.path.realpath(os.path.join(file_dir, category_name)) + with open(category_name, "rb") as file: prompt_yaml_file = yaml.full_load(file) @@ -98,7 +101,7 @@ def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, **kwa # for prompt in prompt_name: # prompt_list.append(utils.pattern_match(prompt_name, prompts.all_template_names)) # else: - prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names) + # prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names) return [":".join([category_name, prompt]) for prompt in prompt_list] diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index b935d106..3de56fff 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -38,7 +38,7 @@ def register_configurable_task(config: Dict[str, str]) -> int: return 0 -def register_configurable_group(config: Dict[str, str]) -> int: +def register_configurable_group(config: Dict[str, str], yaml_path: str = None) -> int: group = config["group"] all_task_list = config["task"] config_list = [task for task in all_task_list if type(task) != str] @@ -57,6 +57,7 @@ def register_configurable_group(config: Dict[str, str]) -> int: # **_task["CONFIG"], # **task_config # } + task_config = utils.load_yaml_config(yaml_path, task_config) var_configs = check_prompt_config( { **task_config, @@ -128,6 +129,10 @@ def include_task_folder(task_dir: str, register_task=True) -> None: try: config = utils.load_yaml_config(yaml_path) + # if ("prompts" in config) and (len(config.keys()) == 1): + + # continue + if register_task: all_configs = check_prompt_config(config) for config in all_configs: @@ -136,9 +141,11 @@ def include_task_folder(task_dir: str, register_task=True) -> None: # If a `task` in config is a list, # that means it's a benchmark if type(config["task"]) == list: - register_configurable_group(config) + register_configurable_group(config, yaml_path) except Exception as error: + import traceback + print(traceback.format_exc()) eval_logger.warning( "Failed to load config in\n" f" {yaml_path}\n" diff --git a/lm_eval/tasks/benchmarks/flan_anli.yaml b/lm_eval/tasks/benchmarks/flan/flan_anli.yaml similarity index 70% rename from lm_eval/tasks/benchmarks/flan_anli.yaml rename to lm_eval/tasks/benchmarks/flan/flan_anli.yaml index d6201bb7..ae245c85 100644 --- a/lm_eval/tasks/benchmarks/flan_anli.yaml +++ b/lm_eval/tasks/benchmarks/flan/flan_anli.yaml @@ -3,15 +3,15 @@ task: - include: flan/yaml_templates/held_in_template_yaml task: anli_r1 dataset_path: anli - use_prompt: flan/prompt_templates/flan_anli.yaml:* + use_prompt: flan/prompt_templates/anli.yaml:* validation_split: dev_r1 - include: flan/yaml_templates/held_in_template_yaml task: anli_r2 dataset_path: anli - use_prompt: flan/prompt_templates/flan_anli.yaml:* + use_prompt: flan/prompt_templates/anli.yaml:* validation_split: dev_r2 - include: flan/yaml_templates/held_in_template_yaml task: anli_r3 dataset_path: anli - use_prompt: flan/prompt_templates/flan_anli.yaml:* + use_prompt: flan/prompt_templates/anli.yaml:* validation_split: dev_r3 diff --git a/lm_eval/tasks/benchmarks/flan/flan_arc.yaml b/lm_eval/tasks/benchmarks/flan/flan_arc.yaml new file mode 100644 index 00000000..0e1efe90 --- /dev/null +++ b/lm_eval/tasks/benchmarks/flan/flan_arc.yaml @@ -0,0 +1,14 @@ +group: flan_arc +task: + - include: flan/yaml_templates/held_in_template_yaml + task: arc_easy + dataset_path: ai2_arc + dataset_name: ARC-Easy + use_prompt: flan/prompt_templates/arc.yaml:* + validation_split: validation + - include: flan/yaml_templates/held_in_template_yaml + task: arc_challenge + dataset_path: ai2_arc + dataset_name: ARC-Challenge + use_prompt: flan/prompt_templates/arc.yaml:* + validation_split: validation diff --git a/lm_eval/tasks/benchmarks/flan_boolq.yaml b/lm_eval/tasks/benchmarks/flan/flan_boolq.yaml similarity index 74% rename from lm_eval/tasks/benchmarks/flan_boolq.yaml rename to lm_eval/tasks/benchmarks/flan/flan_boolq.yaml index f7ca4796..7ba060e7 100644 --- a/lm_eval/tasks/benchmarks/flan_boolq.yaml +++ b/lm_eval/tasks/benchmarks/flan/flan_boolq.yaml @@ -3,5 +3,5 @@ task: - include: flan/yaml_templates/held_in_template_yaml dataset_path: super_glue dataset_name: boolq - use_prompt: flan/prompt_templates/flan_boolq.yaml:* + use_prompt: flan/prompt_templates/boolq.yaml:* validation_split: validation diff --git a/lm_eval/tasks/benchmarks/flan_cot.yaml b/lm_eval/tasks/benchmarks/flan/flan_cot.yaml similarity index 100% rename from lm_eval/tasks/benchmarks/flan_cot.yaml rename to lm_eval/tasks/benchmarks/flan/flan_cot.yaml diff --git a/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml b/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml new file mode 100644 index 00000000..5465b58c --- /dev/null +++ b/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml @@ -0,0 +1,6 @@ +group: flan_held_in +task: + - flan_boolq + - flan_rte + - flan_anli + - flan_arc diff --git a/lm_eval/tasks/benchmarks/flan_held_in.yaml b/lm_eval/tasks/benchmarks/flan/flan_held_in_yaml similarity index 72% rename from lm_eval/tasks/benchmarks/flan_held_in.yaml rename to lm_eval/tasks/benchmarks/flan/flan_held_in_yaml index f1965c9d..a31a942e 100644 --- a/lm_eval/tasks/benchmarks/flan_held_in.yaml +++ b/lm_eval/tasks/benchmarks/flan/flan_held_in_yaml @@ -3,37 +3,37 @@ task: - include: flan/yaml_templates/held_in_template_yaml dataset_path: super_glue dataset_name: boolq - use_prompt: flan/prompt_templates/flan_boolq.yaml:* + use_prompt: flan/prompt_templates/boolq.yaml:* validation_split: validation - include: flan/yaml_templates/held_in_template_yaml dataset_path: super_glue dataset_name: rte - use_prompt: flan/prompt_templates/flan_rte.yaml:* + use_prompt: flan/prompt_templates/rte.yaml:* validation_split: validation - include: flan/yaml_templates/held_in_template_yaml task: anli_r1 dataset_path: anli - use_prompt: flan/prompt_templates/flan_anli.yaml:* + use_prompt: flan/prompt_templates/anli.yaml:* validation_split: dev_r1 - include: flan/yaml_templates/held_in_template_yaml task: anli_r2 dataset_path: anli - use_prompt: flan/prompt_templates/flan_anli.yaml:* + use_prompt: flan/prompt_templates/anli.yaml:* validation_split: dev_r2 - include: flan/yaml_templates/held_in_template_yaml task: anli_r3 dataset_path: anli - use_prompt: flan/prompt_templates/flan_anli.yaml:* + use_prompt: flan/prompt_templates/anli.yaml:* validation_split: dev_r3 - include: flan/yaml_templates/held_in_template_yaml task: arc_easy dataset_path: ai2_arc dataset_name: ARC-Easy - use_prompt: flan/prompt_templates/flan_arc.yaml:* + use_prompt: flan/prompt_templates/arc.yaml:* validation_split: validation - include: flan/yaml_templates/held_in_template_yaml task: arc_challenge dataset_path: ai2_arc dataset_name: ARC-Challenge - use_prompt: flan/prompt_templates/flan_arc.yaml:* + use_prompt: flan/prompt_templates/arc.yaml:* validation_split: validation diff --git a/lm_eval/tasks/benchmarks/flan_held_out.yaml b/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml similarity index 100% rename from lm_eval/tasks/benchmarks/flan_held_out.yaml rename to lm_eval/tasks/benchmarks/flan/flan_held_out.yaml diff --git a/lm_eval/tasks/benchmarks/flan_rte.yaml b/lm_eval/tasks/benchmarks/flan/flan_rte.yaml similarity index 100% rename from lm_eval/tasks/benchmarks/flan_rte.yaml rename to lm_eval/tasks/benchmarks/flan/flan_rte.yaml diff --git a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_anli.yaml b/lm_eval/tasks/benchmarks/flan/prompt_templates/anli.yaml similarity index 100% rename from lm_eval/tasks/benchmarks/flan/prompt_templates/flan_anli.yaml rename to lm_eval/tasks/benchmarks/flan/prompt_templates/anli.yaml diff --git a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_arc.yaml b/lm_eval/tasks/benchmarks/flan/prompt_templates/arc.yaml similarity index 100% rename from lm_eval/tasks/benchmarks/flan/prompt_templates/flan_arc.yaml rename to lm_eval/tasks/benchmarks/flan/prompt_templates/arc.yaml diff --git a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_boolq.yaml b/lm_eval/tasks/benchmarks/flan/prompt_templates/boolq.yaml similarity index 100% rename from lm_eval/tasks/benchmarks/flan/prompt_templates/flan_boolq.yaml rename to lm_eval/tasks/benchmarks/flan/prompt_templates/boolq.yaml diff --git a/lm_eval/tasks/benchmarks/flan/prompt_templates/flan_rte.yaml b/lm_eval/tasks/benchmarks/flan/prompt_templates/rte.yaml similarity index 100% rename from lm_eval/tasks/benchmarks/flan/prompt_templates/flan_rte.yaml rename to lm_eval/tasks/benchmarks/flan/prompt_templates/rte.yaml diff --git a/lm_eval/utils.py b/lm_eval/utils.py index 7014a81e..150aa55c 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -426,7 +426,9 @@ def load_yaml_config(yaml_path=None, yaml_config=None, yaml_dir=None): if yaml_config is None: with open(yaml_path, "rb") as file: yaml_config = yaml.full_load(file) - yaml_dir = os.path.dirname(yaml_path) + + if yaml_dir is None: + yaml_dir = os.path.dirname(yaml_path) assert yaml_dir is not None -- GitLab From 307118731f93443f14f490d058db7945492599b3 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 26 Sep 2023 15:02:22 +0000 Subject: [PATCH 236/264] modify to conform to pre-commit --- lm_eval/tasks/code_x_glue/code-text/bleu.py | 39 +++++++++++++-------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/lm_eval/tasks/code_x_glue/code-text/bleu.py b/lm_eval/tasks/code_x_glue/code-text/bleu.py index aff16afe..310c626c 100644 --- a/lm_eval/tasks/code_x_glue/code-text/bleu.py +++ b/lm_eval/tasks/code_x_glue/code-text/bleu.py @@ -1,4 +1,12 @@ #!/usr/bin/python +import os +import re +import sys +import math +import subprocess +import xml.sax.saxutils + +from typing import List, Pattern, Tuple, Union, Dict, Any, Optional """ This script was adapted from the original version by hieuhoang1972 which is part of MOSES. @@ -17,17 +25,13 @@ score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU sco The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible. """ -import sys, math, re, xml.sax.saxutils -import subprocess -import os - # Added to bypass NIST-style pre-processing of hyp and ref files -- wade nonorm = 0 preserve_case = False eff_ref_len = "shortest" -normalize1 = [ +normalize1: List[Tuple[Union[Pattern[str], str], str]] = [ ("", ""), # strip "skipped" tags (r"-\n", ""), # strip end-of-line hyphenation and join lines (r"\n", " "), # join lines @@ -35,7 +39,7 @@ normalize1 = [ ] normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1] -normalize2 = [ +normalize2: List[Tuple[Union[Pattern[str], str], str]] = [ ( r"([\{-\~\[-\` -\&\(-\+\:-\@\/])", r" \1 ", @@ -74,7 +78,7 @@ def normalize(s): def count_ngrams(words, n=4): - counts = {} + counts: Dict[Any, int] = {} for k in range(1, n + 1): for i in range(len(words) - k + 1): ngram = tuple(words[i : i + k]) @@ -88,7 +92,7 @@ def cook_refs(refs, n=4): needs to know about them.""" refs = [normalize(ref) for ref in refs] - maxcounts = {} + maxcounts: Dict[Tuple[str], int] = {} for ref in refs: counts = count_ngrams(ref, n) for (ngram, count) in counts.items(): @@ -101,7 +105,7 @@ def cook_test(test, item, n=4): encapsulates everything that BLEU needs to know about it.""" (reflens, refmaxcounts) = item test = normalize(test) - result = {} + result: Dict[str, Any] = {} result["testlen"] = len(test) # Calculate effective reference sentence length. @@ -111,7 +115,7 @@ def cook_test(test, item, n=4): elif eff_ref_len == "average": result["reflen"] = float(sum(reflens)) / len(reflens) elif eff_ref_len == "closest": - min_diff = None + min_diff: Optional[int] = None for reflen in reflens: if min_diff is None or abs(reflen - len(test)) < min_diff: min_diff = abs(reflen - len(test)) @@ -128,7 +132,12 @@ def cook_test(test, item, n=4): def score_cooked(allcomps, n=4, ground=0, smooth=1): - totalcomps = {"testlen": 0, "reflen": 0, "guess": [0] * n, "correct": [0] * n} + totalcomps: Dict[str, Any] = { + "testlen": 0, + "reflen": 0, + "guess": [0] * n, + "correct": [0] * n, + } for comps in allcomps: for key in ["testlen", "reflen"]: totalcomps[key] += comps[key] @@ -136,7 +145,7 @@ def score_cooked(allcomps, n=4, ground=0, smooth=1): for k in range(n): totalcomps[key][k] += comps[key][k] logbleu = 0.0 - all_bleus = [] + all_bleus: List[float] = [] for k in range(n): correct = totalcomps["correct"][k] guess = totalcomps["guess"][k] @@ -147,7 +156,7 @@ def score_cooked(allcomps, n=4, ground=0, smooth=1): guess + addsmooth + sys.float_info.min ) if guess == 0: - all_bleus.append(-10000000) + all_bleus.append(-10000000.0) else: all_bleus.append(math.log(correct + sys.float_info.min) - math.log(guess)) @@ -175,8 +184,8 @@ def splitPuncts(line): def computeMaps(predictions, goldfile): - predictionMap = {} - goldMap = {} + predictionMap: Dict[str, list] = {} + goldMap: Dict[str, list] = {} gf = open(goldfile, "r") for row in predictions: -- GitLab From b2d16321724b572967bc7f1cbe64ae0de4eaa0dd Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 26 Sep 2023 15:06:35 +0000 Subject: [PATCH 237/264] update loading prompts --- lm_eval/prompts/__init__.py | 11 +-- lm_eval/tasks/__init__.py | 71 +++++++++++-------- lm_eval/tasks/bbh/_generate_configs.py | 2 +- lm_eval/tasks/benchmarks/flan/flan_anli.yaml | 12 ++-- lm_eval/tasks/benchmarks/flan/flan_arc.yaml | 8 +-- lm_eval/tasks/benchmarks/flan/flan_boolq.yaml | 4 +- lm_eval/tasks/benchmarks/flan/flan_cot.yaml | 4 +- lm_eval/tasks/benchmarks/flan/flan_rte.yaml | 4 +- .../paws-x/{utils.py => _generate_config.py} | 0 lm_eval/utils.py | 2 +- 10 files changed, 66 insertions(+), 52 deletions(-) rename lm_eval/tasks/paws-x/{utils.py => _generate_config.py} (100%) diff --git a/lm_eval/prompts/__init__.py b/lm_eval/prompts/__init__.py index 545f3331..68eeac6c 100644 --- a/lm_eval/prompts/__init__.py +++ b/lm_eval/prompts/__init__.py @@ -1,3 +1,4 @@ +import os import ast from typing import Dict @@ -65,7 +66,9 @@ def get_prompt(prompt_id: str, dataset_name: str = None, subset_name: str = None ) -def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, file_dir=None, **kwargs): +def load_prompt_list( + use_prompt: str, dataset_name=None, subset_name=None, yaml_path=None, **kwargs +): category_name, prompt_name = use_prompt.split(":") @@ -84,8 +87,8 @@ def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, file_ elif ".yaml" in category_name: import yaml - if file_dir is not None: - category_name = os.path.realpath(os.path.join(file_dir, category_name)) + if yaml_path is not None: + category_name = os.path.realpath(os.path.join(yaml_path, category_name)) with open(category_name, "rb") as file: prompt_yaml_file = yaml.full_load(file) @@ -94,7 +97,7 @@ def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, file_ prompt_name, prompt_yaml_file["prompts"].keys() ) - category_name, *prompt_name = use_prompt.split(":") + # category_name, *prompt_name = use_prompt.split(":") # TODO allow to multiple prompt naming # if len(prompt_name) > 1: # prompt_list = [] diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 3de56fff..f0923f09 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -45,24 +45,25 @@ def register_configurable_group(config: Dict[str, str], yaml_path: str = None) - task_list = [task for task in all_task_list if type(task) == str] for task_config in config_list: - # if "task" in task_config: - # task = task_config["task"] - # if task in GROUP_REGISTRY: - # task_list = GROUP_REGISTRY[task] - # elif task in TASK_REGISTRY: - # task_list = [TASK_REGISTRY[task]] - - # for _task in task_list: - # task_config = { - # **_task["CONFIG"], - # **task_config - # } + # assert "task" in task_config: + # task = task_config["task"] + # if task in GROUP_REGISTRY: + # task_list = GROUP_REGISTRY[task] + # elif task in TASK_REGISTRY: + # task_list = [TASK_REGISTRY[task]] + + # for _task in task_list: + # task_config = { + # **_task["CONFIG"], + # **task_config + # } task_config = utils.load_yaml_config(yaml_path, task_config) var_configs = check_prompt_config( { **task_config, **{"group": group}, - } + }, + yaml_path=os.path.dirname(yaml_path), ) for config in var_configs: register_configurable_task(config) @@ -79,13 +80,16 @@ def register_configurable_group(config: Dict[str, str], yaml_path: str = None) - return 0 -def check_prompt_config(config: Dict[str, str]) -> List[Dict[str, str]]: +def check_prompt_config( + config: Dict[str, str], yaml_path: str = None +) -> List[Dict[str, str]]: all_configs = [] if "use_prompt" in config: prompt_list = prompts.load_prompt_list( use_prompt=config["use_prompt"], dataset_name=config["dataset_path"], subset_name=config["dataset_name"] if "dataset_name" in config else None, + yaml_path=yaml_path, ) for idx, prompt_variation in enumerate(prompt_list): all_configs.append( @@ -98,7 +102,9 @@ def check_prompt_config(config: Dict[str, str]) -> List[Dict[str, str]]: config["task"] if "task" in config else get_task_name_from_config(config), - prompt_variation, + prompt_variation.split("/")[-1] + if ".yaml" in prompt_variation + else prompt_variation, ] ) }, @@ -117,7 +123,7 @@ def get_task_name_from_config(task_config: Dict[str, str]) -> str: return "{dataset_path}".format(**task_config) -def include_task_folder(task_dir: str, register_task=True) -> None: +def include_task_folder(task_dir: str, register_task: bool = True) -> None: """ Calling this function """ @@ -129,29 +135,33 @@ def include_task_folder(task_dir: str, register_task=True) -> None: try: config = utils.load_yaml_config(yaml_path) - # if ("prompts" in config) and (len(config.keys()) == 1): - - # continue + if "task" not in config: + continue - if register_task: - all_configs = check_prompt_config(config) - for config in all_configs: - register_configurable_task(config) - else: - # If a `task` in config is a list, - # that means it's a benchmark - if type(config["task"]) == list: - register_configurable_group(config, yaml_path) + all_configs = check_prompt_config( + config, yaml_path=os.path.dirname(yaml_path) + ) + for config in all_configs: + if register_task: + if type(config["task"]) == str: + register_configurable_task(config) + else: + if type(config["task"]) == list: + register_configurable_group(config, yaml_path) except Exception as error: import traceback - print(traceback.format_exc()) + + print("###") + print(yaml_path) eval_logger.warning( "Failed to load config in\n" f" {yaml_path}\n" " Config will not be added to registry\n" - f" Error: {error}" + f" Error: {error}\n" + f" Traceback: {traceback.format_exc()}" ) + return 0 def include_path(task_dir): @@ -160,6 +170,7 @@ def include_path(task_dir): include_task_folder(task_dir, register_task=False) return 0 + task_dir = os.path.dirname(os.path.abspath(__file__)) + "/" include_path(task_dir) diff --git a/lm_eval/tasks/bbh/_generate_configs.py b/lm_eval/tasks/bbh/_generate_configs.py index 0c882af0..1f528722 100644 --- a/lm_eval/tasks/bbh/_generate_configs.py +++ b/lm_eval/tasks/bbh/_generate_configs.py @@ -54,7 +54,7 @@ if __name__ == "__main__": shot = "Q:" + shot try: answer = answer_regex.search(shot)[0] - except: + except Exception: print("task", task) print(shot) example = shot.split("Let's think step by step.")[0] diff --git a/lm_eval/tasks/benchmarks/flan/flan_anli.yaml b/lm_eval/tasks/benchmarks/flan/flan_anli.yaml index ae245c85..21278e1f 100644 --- a/lm_eval/tasks/benchmarks/flan/flan_anli.yaml +++ b/lm_eval/tasks/benchmarks/flan/flan_anli.yaml @@ -1,17 +1,17 @@ group: flan_anli task: - - include: flan/yaml_templates/held_in_template_yaml + - include: yaml_templates/held_in_template_yaml task: anli_r1 dataset_path: anli - use_prompt: flan/prompt_templates/anli.yaml:* + use_prompt: prompt_templates/anli.yaml:* validation_split: dev_r1 - - include: flan/yaml_templates/held_in_template_yaml + - include: yaml_templates/held_in_template_yaml task: anli_r2 dataset_path: anli - use_prompt: flan/prompt_templates/anli.yaml:* + use_prompt: prompt_templates/anli.yaml:* validation_split: dev_r2 - - include: flan/yaml_templates/held_in_template_yaml + - include: yaml_templates/held_in_template_yaml task: anli_r3 dataset_path: anli - use_prompt: flan/prompt_templates/anli.yaml:* + use_prompt: prompt_templates/anli.yaml:* validation_split: dev_r3 diff --git a/lm_eval/tasks/benchmarks/flan/flan_arc.yaml b/lm_eval/tasks/benchmarks/flan/flan_arc.yaml index 0e1efe90..4e73b7ce 100644 --- a/lm_eval/tasks/benchmarks/flan/flan_arc.yaml +++ b/lm_eval/tasks/benchmarks/flan/flan_arc.yaml @@ -1,14 +1,14 @@ group: flan_arc task: - - include: flan/yaml_templates/held_in_template_yaml + - include: yaml_templates/held_in_template_yaml task: arc_easy dataset_path: ai2_arc dataset_name: ARC-Easy - use_prompt: flan/prompt_templates/arc.yaml:* + use_prompt: prompt_templates/arc.yaml:* validation_split: validation - - include: flan/yaml_templates/held_in_template_yaml + - include: yaml_templates/held_in_template_yaml task: arc_challenge dataset_path: ai2_arc dataset_name: ARC-Challenge - use_prompt: flan/prompt_templates/arc.yaml:* + use_prompt: prompt_templates/arc.yaml:* validation_split: validation diff --git a/lm_eval/tasks/benchmarks/flan/flan_boolq.yaml b/lm_eval/tasks/benchmarks/flan/flan_boolq.yaml index 7ba060e7..8fe36cd5 100644 --- a/lm_eval/tasks/benchmarks/flan/flan_boolq.yaml +++ b/lm_eval/tasks/benchmarks/flan/flan_boolq.yaml @@ -1,7 +1,7 @@ group: flan_boolq task: - - include: flan/yaml_templates/held_in_template_yaml + - include: yaml_templates/held_in_template_yaml dataset_path: super_glue dataset_name: boolq - use_prompt: flan/prompt_templates/boolq.yaml:* + use_prompt: prompt_templates/boolq.yaml:* validation_split: validation diff --git a/lm_eval/tasks/benchmarks/flan/flan_cot.yaml b/lm_eval/tasks/benchmarks/flan/flan_cot.yaml index ff6edc24..10102d24 100644 --- a/lm_eval/tasks/benchmarks/flan/flan_cot.yaml +++ b/lm_eval/tasks/benchmarks/flan/flan_cot.yaml @@ -1,11 +1,11 @@ group: flan_cot task: - - include: flan/yaml_templates/cot_template_yaml + - include: yaml_templates/cot_template_yaml dataset_path: gsmk dataset_name: boolq use_prompt: promptsource:* validation_split: validation - - include: flan/yaml_templates/cot_template_yaml + - include: yaml_templates/cot_template_yaml dataset_path: EleutherAI/asdiv use_prompt: promptsource:* validation_split: validation diff --git a/lm_eval/tasks/benchmarks/flan/flan_rte.yaml b/lm_eval/tasks/benchmarks/flan/flan_rte.yaml index cf5832bf..a4f40736 100644 --- a/lm_eval/tasks/benchmarks/flan/flan_rte.yaml +++ b/lm_eval/tasks/benchmarks/flan/flan_rte.yaml @@ -1,7 +1,7 @@ group: flan_rte task: - - include: flan/yaml_templates/held_in_template_yaml + - include: yaml_templates/held_in_template_yaml dataset_path: super_glue dataset_name: rte - use_prompt: flan/prompt_templates/flan_rte.yaml:* + use_prompt: prompt_templates/rte.yaml:* validation_split: validation diff --git a/lm_eval/tasks/paws-x/utils.py b/lm_eval/tasks/paws-x/_generate_config.py similarity index 100% rename from lm_eval/tasks/paws-x/utils.py rename to lm_eval/tasks/paws-x/_generate_config.py diff --git a/lm_eval/utils.py b/lm_eval/utils.py index 150aa55c..356fdf7b 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -426,7 +426,7 @@ def load_yaml_config(yaml_path=None, yaml_config=None, yaml_dir=None): if yaml_config is None: with open(yaml_path, "rb") as file: yaml_config = yaml.full_load(file) - + if yaml_dir is None: yaml_dir = os.path.dirname(yaml_path) -- GitLab From 6ae9e9e80c1e0049237d0179ac94fd57bc49029b Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Tue, 26 Sep 2023 15:36:48 +0000 Subject: [PATCH 238/264] remove comments --- lm_eval/tasks/__init__.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index f0923f09..64dd4fdb 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -45,18 +45,7 @@ def register_configurable_group(config: Dict[str, str], yaml_path: str = None) - task_list = [task for task in all_task_list if type(task) == str] for task_config in config_list: - # assert "task" in task_config: - # task = task_config["task"] - # if task in GROUP_REGISTRY: - # task_list = GROUP_REGISTRY[task] - # elif task in TASK_REGISTRY: - # task_list = [TASK_REGISTRY[task]] - - # for _task in task_list: - # task_config = { - # **_task["CONFIG"], - # **task_config - # } + task_config = utils.load_yaml_config(yaml_path, task_config) var_configs = check_prompt_config( { -- GitLab From 3d2ee4d43e25a069aec30ccbdff17861deed55e1 Mon Sep 17 00:00:00 2001 From: Hailey Schoelkopf <65563625+haileyschoelkopf@users.noreply.github.com> Date: Tue, 26 Sep 2023 12:10:04 -0400 Subject: [PATCH 239/264] Update _default_template_yaml --- lm_eval/tasks/mmlu/default/_default_template_yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lm_eval/tasks/mmlu/default/_default_template_yaml b/lm_eval/tasks/mmlu/default/_default_template_yaml index 1064a2d1..bd989c40 100644 --- a/lm_eval/tasks/mmlu/default/_default_template_yaml +++ b/lm_eval/tasks/mmlu/default/_default_template_yaml @@ -2,6 +2,8 @@ group: mmlu dataset_path: cais/mmlu test_split: test fewshot_split: dev +fewshot_config: + sampler: first_n output_type: multiple_choice doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" doc_to_choice: ["A", "B", "C", "D"] -- GitLab From 5c418e2e7d333b2c581dec88391e9d46312fb917 Mon Sep 17 00:00:00 2001 From: Tanishq Abraham <37097934+tmabraham@users.noreply.github.com> Date: Fri, 29 Sep 2023 03:31:20 -0700 Subject: [PATCH 240/264] Update pubmedqa.yaml --- lm_eval/tasks/pubmedqa/pubmedqa.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lm_eval/tasks/pubmedqa/pubmedqa.yaml b/lm_eval/tasks/pubmedqa/pubmedqa.yaml index ae5e0657..a182521e 100644 --- a/lm_eval/tasks/pubmedqa/pubmedqa.yaml +++ b/lm_eval/tasks/pubmedqa/pubmedqa.yaml @@ -1,10 +1,10 @@ task: pubmed_qa -dataset_path: pubmed_qa -dataset_name: pqa_labeled +dataset_path: bigbio/pubmed_qa +dataset_name: pubmed_qa_labeled_fold0_source output_type: multiple_choice -training_split: null -validation_split: null -test_split: train +training_split: train +validation_split: validation +test_split: test doc_to_text: !function preprocess_pubmedqa.doc_to_text doc_to_target: final_decision doc_to_choice: ["yes", "no", "maybe"] -- GitLab From 6658f510321c97f33b0fb08e5ddaf136d12bfff2 Mon Sep 17 00:00:00 2001 From: Tanishq Abraham <37097934+tmabraham@users.noreply.github.com> Date: Fri, 29 Sep 2023 03:31:33 -0700 Subject: [PATCH 241/264] Update preprocess_pubmedqa.py --- lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py b/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py index 095e58b2..516f0e2f 100644 --- a/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py +++ b/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py @@ -1,7 +1,7 @@ def doc_to_text(doc) -> str: - ctxs = "\n".join(doc["context"]["contexts"]) + ctxs = "\n".join(doc["CONTEXTS"]) return "Abstract: {}\nQuestion: {}\nAnswer:".format( - ctxs, doc["question"], doc["final_decision"] + ctxs, doc["QUESTION"], doc["final_decision"] ) -- GitLab From e634f83fe931108d080936ee2b17f878fa3f1ba6 Mon Sep 17 00:00:00 2001 From: Chris Date: Mon, 2 Oct 2023 16:25:32 +0200 Subject: [PATCH 242/264] Add PolEmo2 tasks --- lm_eval/tasks/polemo2/README.md | 57 ++++++++++++++++++++++++++ lm_eval/tasks/polemo2/polemo2_in.yaml | 43 +++++++++++++++++++ lm_eval/tasks/polemo2/polemo2_out.yaml | 4 ++ 3 files changed, 104 insertions(+) create mode 100644 lm_eval/tasks/polemo2/README.md create mode 100644 lm_eval/tasks/polemo2/polemo2_in.yaml create mode 100644 lm_eval/tasks/polemo2/polemo2_out.yaml diff --git a/lm_eval/tasks/polemo2/README.md b/lm_eval/tasks/polemo2/README.md new file mode 100644 index 00000000..5d48ece2 --- /dev/null +++ b/lm_eval/tasks/polemo2/README.md @@ -0,0 +1,57 @@ +# PolEmo 2.0 + +### Paper + +Title: `Multi-Level Sentiment Analysis of PolEmo 2.0: Extended Corpus of Multi-Domain Consumer Reviews` + +Abstract: https://aclanthology.org/K19-1092/ + +The PolEmo 2.0 is a dataset of online consumer reviews in Polish from four domains: medicine, hotels, products, and university. It is human-annotated on a level of full reviews and individual sentences. It comprises over 8000 reviews, about 85% from the medicine and hotel domains. +The goal is to predict the sentiment of a review. There are two separate test sets, to allow for in-domain (medicine and hotels) as well as out-of-domain (products and university) validation. + +Homepage: https://clarin-pl.eu/dspace/handle/11321/710 + + +### Citation + +``` +@inproceedings{kocon-etal-2019-multi, + title = "Multi-Level Sentiment Analysis of {P}ol{E}mo 2.0: Extended Corpus of Multi-Domain Consumer Reviews", + author = "Koco{\'n}, Jan and + Mi{\l}kowski, Piotr and + Za{\'s}ko-Zieli{\'n}ska, Monika", + booktitle = "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", + month = nov, + year = "2019", + address = "Hong Kong, China", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/K19-1092", + doi = "10.18653/v1/K19-1092", + pages = "980--991", + abstract = "In this article we present an extended version of PolEmo {--} a corpus of consumer reviews from 4 domains: medicine, hotels, products and school. Current version (PolEmo 2.0) contains 8,216 reviews having 57,466 sentences. Each text and sentence was manually annotated with sentiment in 2+1 scheme, which gives a total of 197,046 annotations. We obtained a high value of Positive Specific Agreement, which is 0.91 for texts and 0.88 for sentences. PolEmo 2.0 is publicly available under a Creative Commons copyright license. We explored recent deep learning approaches for the recognition of sentiment, such as Bi-directional Long Short-Term Memory (BiLSTM) and Bidirectional Encoder Representations from Transformers (BERT).", +} +``` + +### Groups and Tasks + +#### Groups + +* `polemo2`: Evaluates `polemo2_in` and `polemo2_out` + +#### Tasks + +* `polemo2_in`: evaluates sentiment predictions of in-domain (medicine and hotels) reviews +* `polemo2_out`: evaluates sentiment predictions of out-of-domain (products and university) reviews + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm_eval/tasks/polemo2/polemo2_in.yaml b/lm_eval/tasks/polemo2/polemo2_in.yaml new file mode 100644 index 00000000..4c2250f8 --- /dev/null +++ b/lm_eval/tasks/polemo2/polemo2_in.yaml @@ -0,0 +1,43 @@ +group: + - polemo2 +task: polemo2_in +dataset_path: allegro/klej-polemo2-in +dataset_name: klej-polemo2-in +output_type: greedy_until +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Opinia: \"{{sentence}}\"\nOkreśl sentyment podanej opinii. Możliwe odpowiedzi:\nA - Neutralny\nB - Negatywny\nC - Pozytywny\nD - Niejednoznaczny\nPrawidłowa odpowiedź:" +doc_to_target: "{{['__label__meta_zero', '__label__meta_minus_m', '__label__meta_plus_m', '__label__meta_amb'].index(target)}}" +should_decontaminate: true +doc_to_decontamination_query: "{{sentence}}" +generation_kwargs: + until: + - "." + - "," + do_sample: false + temperature: 0.0 + max_gen_toks: 50 +filter_list: + - name: "score-first" + filter: + - function: "regex" + regex_pattern: "(\\b[ABCD]\\b)" + - function: "take_first" + - function: "map" + mapping_dict: + A: 0 + B: 1 + C: 2 + D: 3 + default_value: -1 + - function: "take_first" +metric_list: + - metric: f1 + aggregation: mean + higher_is_better: true + hf_evaluate: true + average: micro + - metric: accuracy + aggregation: mean + higher_is_better: true diff --git a/lm_eval/tasks/polemo2/polemo2_out.yaml b/lm_eval/tasks/polemo2/polemo2_out.yaml new file mode 100644 index 00000000..a815a780 --- /dev/null +++ b/lm_eval/tasks/polemo2/polemo2_out.yaml @@ -0,0 +1,4 @@ +include: polemo2_in.yaml +task: polemo2_out +dataset_path: allegro/klej-polemo2-out +dataset_name: klej-polemo2-out -- GitLab From 06ce7a62dece2427ede1ea34b6aad77b7ac5cbd4 Mon Sep 17 00:00:00 2001 From: Chris Date: Mon, 2 Oct 2023 16:33:25 +0200 Subject: [PATCH 243/264] Fix formatting --- lm_eval/tasks/polemo2/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/polemo2/README.md b/lm_eval/tasks/polemo2/README.md index 5d48ece2..837c704d 100644 --- a/lm_eval/tasks/polemo2/README.md +++ b/lm_eval/tasks/polemo2/README.md @@ -1,4 +1,4 @@ -# PolEmo 2.0 +# PolEmo 2.0 ### Paper -- GitLab From 3f2dd9fbbd9f035870f6b7b224e9daae1757818c Mon Sep 17 00:00:00 2001 From: Chris <34248815+chrisociepa@users.noreply.github.com> Date: Tue, 3 Oct 2023 09:21:24 +0200 Subject: [PATCH 244/264] Change python version to 3.8 in .pre-commit-config.yaml --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6c1317b6..8fa6aeaa 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,7 +33,7 @@ repos: rev: 22.3.0 hooks: - id: black - language_version: python3.9 + language_version: python3.8 - repo: https://github.com/codespell-project/codespell rev: v2.1.0 hooks: -- GitLab From bf275d00843369c22269a7890495bb9bb0e9f7a7 Mon Sep 17 00:00:00 2001 From: Chris <34248815+chrisociepa@users.noreply.github.com> Date: Tue, 3 Oct 2023 09:33:19 +0200 Subject: [PATCH 245/264] Change Python Version to 3.8 in GitHub Actions --- .github/workflows/unit_tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index e9147b83..c56347dd 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -22,10 +22,10 @@ jobs: steps: - name: Checkout Code uses: actions/checkout@v3 - - name: Set up Python 3.9 + - name: Set up Python 3.8 uses: actions/setup-python@v4 with: - python-version: 3.9 + python-version: 3.8 cache: pip cache-dependency-path: setup.py - name: Install dependencies -- GitLab From 3a7f52d033e72eb9bd34ce6aa8ff48090893d9e2 Mon Sep 17 00:00:00 2001 From: Hailey Schoelkopf <65563625+haileyschoelkopf@users.noreply.github.com> Date: Tue, 3 Oct 2023 14:31:24 -0400 Subject: [PATCH 246/264] Bring task name to `pubmedqa` in line with master branch --- lm_eval/tasks/pubmedqa/pubmedqa.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/pubmedqa/pubmedqa.yaml b/lm_eval/tasks/pubmedqa/pubmedqa.yaml index a182521e..6903aba4 100644 --- a/lm_eval/tasks/pubmedqa/pubmedqa.yaml +++ b/lm_eval/tasks/pubmedqa/pubmedqa.yaml @@ -1,4 +1,4 @@ -task: pubmed_qa +task: pubmedqa dataset_path: bigbio/pubmed_qa dataset_name: pubmed_qa_labeled_fold0_source output_type: multiple_choice -- GitLab From 1a77b4d54e84a7b9a49db2bbc7325ede487c9d00 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 3 Oct 2023 20:12:53 +0000 Subject: [PATCH 247/264] update generate_tasks.py and template yamls --- lm_eval/tasks/bigbench/generate_tasks.py | 2 +- lm_eval/tasks/bigbench/greedy_until_template_yaml | 5 ++--- lm_eval/tasks/bigbench/multiple_choice_template_yaml | 9 ++++++--- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/lm_eval/tasks/bigbench/generate_tasks.py b/lm_eval/tasks/bigbench/generate_tasks.py index fa68190e..dbd7a959 100644 --- a/lm_eval/tasks/bigbench/generate_tasks.py +++ b/lm_eval/tasks/bigbench/generate_tasks.py @@ -185,7 +185,7 @@ def main() -> None: { "include": f"../{task_type}", "task": "bigbench_" + task + "_{}".format(task_type.split("_template_yaml")[0]), - "dataset_name": task, + "dataset_name": task + "_zero_shot", # zero-shot version of the dataset }, f, width=float("inf"), allow_unicode=True diff --git a/lm_eval/tasks/bigbench/greedy_until_template_yaml b/lm_eval/tasks/bigbench/greedy_until_template_yaml index 1d4e492b..db975306 100644 --- a/lm_eval/tasks/bigbench/greedy_until_template_yaml +++ b/lm_eval/tasks/bigbench/greedy_until_template_yaml @@ -1,8 +1,7 @@ group: bigbench -dataset_path: bigbench +dataset_path: hails/bigbench output_type: greedy_until -training_split: train -validation_split: validation +test_split: default doc_to_text: inputs doc_to_target: "{{targets[0]}}" generation_kwargs: diff --git a/lm_eval/tasks/bigbench/multiple_choice_template_yaml b/lm_eval/tasks/bigbench/multiple_choice_template_yaml index 3dd2af61..6211f5b3 100644 --- a/lm_eval/tasks/bigbench/multiple_choice_template_yaml +++ b/lm_eval/tasks/bigbench/multiple_choice_template_yaml @@ -1,10 +1,13 @@ group: bigbench -dataset_path: bigbench +dataset_path: hails/bigbench +dataset_kwargs: + num_shots: 0 # TODO: num of shots should be controlled through this, not through the typical methods + # subtask_name: null output_type: multiple_choice -training_split: train -validation_split: validation +test_split: default doc_to_text: inputs doc_to_target: "{{multiple_choice_targets.index(targets[0])}}" doc_to_choice: "{{multiple_choice_targets}}" metric_list: - metric: acc + # TODO: brier score and other metrics -- GitLab From 8806944e8790170b1b5429af83ceeac63c961660 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 3 Oct 2023 20:13:21 +0000 Subject: [PATCH 248/264] push updated bigbench task yamls --- .../bigbench/greedy_until/abstract_narrative_understanding.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/anachronisms.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/analogical_similarity.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/analytic_entailment.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/arithmetic.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/ascii_word_recognition.yaml | 2 +- .../tasks/bigbench/greedy_until/authorship_verification.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/auto_categorization.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/auto_debugging.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/bbq_lite_json.yaml | 2 +- .../greedy_until/bridging_anaphora_resolution_barqa.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/causal_judgment.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/cause_and_effect.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/checkmate_in_one.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/chess_state_tracking.yaml | 2 +- .../tasks/bigbench/greedy_until/chinese_remainder_theorem.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/cifar10_classification.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/code_line_description.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/codenames.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/color.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/common_morpheme.yaml | 2 +- .../tasks/bigbench/greedy_until/conceptual_combinations.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/conlang_translation.yaml | 2 +- .../greedy_until/contextual_parametric_knowledge_conflicts.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/crash_blossom.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/crass_ai.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/cryobiology_spanish.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/cryptonite.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/cs_algorithms.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/dark_humor_detection.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/date_understanding.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/disambiguation_qa.yaml | 2 +- .../bigbench/greedy_until/discourse_marker_prediction.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/disfl_qa.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/dyck_languages.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/elementary_math_qa.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/emoji_movie.yaml | 2 +- .../tasks/bigbench/greedy_until/emojis_emotion_prediction.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/empirical_judgments.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/english_proverbs.yaml | 2 +- .../tasks/bigbench/greedy_until/english_russian_proverbs.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/entailed_polarity.yaml | 2 +- .../tasks/bigbench/greedy_until/entailed_polarity_hindi.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/epistemic_reasoning.yaml | 2 +- .../greedy_until/evaluating_information_essentiality.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/fact_checker.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/fantasy_reasoning.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/few_shot_nlg.yaml | 2 +- .../tasks/bigbench/greedy_until/figure_of_speech_detection.yaml | 2 +- .../greedy_until/formal_fallacies_syllogisms_negation.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/gem.yaml | 2 +- .../greedy_until/gender_inclusive_sentences_german.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/general_knowledge.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/geometric_shapes.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/goal_step_wikihow.yaml | 2 +- .../tasks/bigbench/greedy_until/gre_reading_comprehension.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/hhh_alignment.yaml | 2 +- .../tasks/bigbench/greedy_until/hindi_question_answering.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/hindu_knowledge.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/hinglish_toxicity.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/human_organs_senses.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/hyperbaton.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/identify_math_theorems.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/identify_odd_metaphor.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/implicatures.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/implicit_relations.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/intent_recognition.yaml | 2 +- .../greedy_until/international_phonetic_alphabet_nli.yaml | 2 +- .../international_phonetic_alphabet_transliterate.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/intersect_geometry.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/irony_identification.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/kanji_ascii.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/kannada.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/key_value_maps.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/known_unknowns.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/language_games.yaml | 2 +- .../tasks/bigbench/greedy_until/language_identification.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/linguistic_mappings.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/linguistics_puzzles.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/list_functions.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/logic_grid_puzzle.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/logical_args.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/logical_deduction.yaml | 2 +- .../tasks/bigbench/greedy_until/logical_fallacy_detection.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/logical_sequence.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/mathematical_induction.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/matrixshapes.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/metaphor_boolean.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/metaphor_understanding.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/minute_mysteries_qa.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/misconceptions.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/misconceptions_russian.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/mnist_ascii.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/modified_arithmetic.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/moral_permissibility.yaml | 2 +- .../bigbench/greedy_until/movie_dialog_same_or_different.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/movie_recommendation.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/mult_data_wrangling.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/multiemo.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/natural_instructions.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/navigate.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/nonsense_words_grammar.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/novel_concepts.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/object_counting.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/odd_one_out.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/operators.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/paragraph_segmentation.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/parsinlu_qa.yaml | 2 +- .../bigbench/greedy_until/parsinlu_reading_comprehension.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/penguins_in_a_table.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/periodic_elements.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/persian_idioms.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/phrase_relatedness.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/physical_intuition.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/physics.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/physics_questions.yaml | 2 +- .../bigbench/greedy_until/play_dialog_same_or_different.yaml | 2 +- .../tasks/bigbench/greedy_until/polish_sequence_labeling.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/presuppositions_as_nli.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/qa_wikidata.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/question_selection.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/real_or_fake_text.yaml | 2 +- .../bigbench/greedy_until/reasoning_about_colored_objects.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/repeat_copy_logic.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/rephrase.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/riddle_sense.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/ruin_names.yaml | 2 +- .../greedy_until/salient_translation_error_detection.yaml | 2 +- .../tasks/bigbench/greedy_until/scientific_press_release.yaml | 2 +- .../greedy_until/semantic_parsing_in_context_sparc.yaml | 2 +- .../tasks/bigbench/greedy_until/semantic_parsing_spider.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/sentence_ambiguity.yaml | 2 +- .../tasks/bigbench/greedy_until/similarities_abstraction.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/simp_turing_concept.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json.yaml | 2 +- .../greedy_until/simple_arithmetic_json_multiple_choice.yaml | 2 +- .../bigbench/greedy_until/simple_arithmetic_json_subtasks.yaml | 2 +- .../greedy_until/simple_arithmetic_multiple_targets_json.yaml | 2 +- .../tasks/bigbench/greedy_until/simple_ethical_questions.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/simple_text_editing.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/snarks.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/social_iqa.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/social_support.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/sports_understanding.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/strange_stories.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/strategyqa.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/sufficient_information.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/suicide_risk.yaml | 2 +- .../tasks/bigbench/greedy_until/swahili_english_proverbs.yaml | 2 +- .../tasks/bigbench/greedy_until/swedish_to_german_proverbs.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/symbol_interpretation.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/temporal_sequences.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/tense.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/timedial.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/topical_chat.yaml | 2 +- .../tasks/bigbench/greedy_until/tracking_shuffled_objects.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/understanding_fables.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/undo_permutation.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/unit_conversion.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/unit_interpretation.yaml | 2 +- .../bigbench/greedy_until/unnatural_in_context_learning.yaml | 2 +- .../tasks/bigbench/greedy_until/vitaminc_fact_verification.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/what_is_the_tao.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/which_wiki_edit.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/winowhy.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/word_sorting.yaml | 2 +- lm_eval/tasks/bigbench/greedy_until/word_unscrambling.yaml | 2 +- .../multiple_choice/abstract_narrative_understanding.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml | 2 +- .../tasks/bigbench/multiple_choice/analogical_similarity.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml | 2 +- .../tasks/bigbench/multiple_choice/ascii_word_recognition.yaml | 2 +- .../tasks/bigbench/multiple_choice/authorship_verification.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml | 2 +- .../multiple_choice/bridging_anaphora_resolution_barqa.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml | 2 +- .../tasks/bigbench/multiple_choice/chess_state_tracking.yaml | 2 +- .../bigbench/multiple_choice/chinese_remainder_theorem.yaml | 2 +- .../tasks/bigbench/multiple_choice/cifar10_classification.yaml | 2 +- .../tasks/bigbench/multiple_choice/code_line_description.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/codenames.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/color.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml | 2 +- .../tasks/bigbench/multiple_choice/conceptual_combinations.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml | 2 +- .../contextual_parametric_knowledge_conflicts.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml | 2 +- .../tasks/bigbench/multiple_choice/dark_humor_detection.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml | 2 +- .../bigbench/multiple_choice/discourse_marker_prediction.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml | 2 +- .../bigbench/multiple_choice/emojis_emotion_prediction.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml | 2 +- .../bigbench/multiple_choice/english_russian_proverbs.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml | 2 +- .../tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml | 2 +- .../multiple_choice/evaluating_information_essentiality.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml | 2 +- .../bigbench/multiple_choice/figure_of_speech_detection.yaml | 2 +- .../multiple_choice/formal_fallacies_syllogisms_negation.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/gem.yaml | 2 +- .../multiple_choice/gender_inclusive_sentences_german.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml | 2 +- .../bigbench/multiple_choice/gre_reading_comprehension.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml | 2 +- .../bigbench/multiple_choice/hindi_question_answering.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml | 2 +- .../tasks/bigbench/multiple_choice/identify_math_theorems.yaml | 2 +- .../tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml | 2 +- .../multiple_choice/international_phonetic_alphabet_nli.yaml | 2 +- .../international_phonetic_alphabet_transliterate.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml | 2 +- .../tasks/bigbench/multiple_choice/irony_identification.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/kannada.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/language_games.yaml | 2 +- .../tasks/bigbench/multiple_choice/language_identification.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml | 2 +- .../bigbench/multiple_choice/logical_fallacy_detection.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml | 2 +- .../tasks/bigbench/multiple_choice/mathematical_induction.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml | 2 +- .../tasks/bigbench/multiple_choice/metaphor_understanding.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml | 2 +- .../tasks/bigbench/multiple_choice/misconceptions_russian.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml | 2 +- .../tasks/bigbench/multiple_choice/moral_permissibility.yaml | 2 +- .../multiple_choice/movie_dialog_same_or_different.yaml | 2 +- .../tasks/bigbench/multiple_choice/movie_recommendation.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml | 2 +- .../tasks/bigbench/multiple_choice/natural_instructions.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/navigate.yaml | 2 +- .../tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/operators.yaml | 2 +- .../tasks/bigbench/multiple_choice/paragraph_segmentation.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml | 2 +- .../multiple_choice/parsinlu_reading_comprehension.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/physics.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml | 2 +- .../bigbench/multiple_choice/play_dialog_same_or_different.yaml | 2 +- .../bigbench/multiple_choice/polish_sequence_labeling.yaml | 2 +- .../tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml | 2 +- .../multiple_choice/reasoning_about_colored_objects.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml | 2 +- .../multiple_choice/salient_translation_error_detection.yaml | 2 +- .../bigbench/multiple_choice/scientific_press_release.yaml | 2 +- .../multiple_choice/semantic_parsing_in_context_sparc.yaml | 2 +- .../tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml | 2 +- .../bigbench/multiple_choice/similarities_abstraction.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml | 2 +- .../tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml | 2 +- .../multiple_choice/simple_arithmetic_json_multiple_choice.yaml | 2 +- .../multiple_choice/simple_arithmetic_json_subtasks.yaml | 2 +- .../simple_arithmetic_multiple_targets_json.yaml | 2 +- .../bigbench/multiple_choice/simple_ethical_questions.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/snarks.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/social_support.yaml | 2 +- .../tasks/bigbench/multiple_choice/sports_understanding.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml | 2 +- .../tasks/bigbench/multiple_choice/sufficient_information.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml | 2 +- .../bigbench/multiple_choice/swahili_english_proverbs.yaml | 2 +- .../bigbench/multiple_choice/swedish_to_german_proverbs.yaml | 2 +- .../tasks/bigbench/multiple_choice/symbol_interpretation.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/tense.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/timedial.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml | 2 +- .../bigbench/multiple_choice/tracking_shuffled_objects.yaml | 2 +- .../tasks/bigbench/multiple_choice/understanding_fables.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml | 2 +- .../bigbench/multiple_choice/unnatural_in_context_learning.yaml | 2 +- .../bigbench/multiple_choice/vitaminc_fact_verification.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml | 2 +- 334 files changed, 334 insertions(+), 334 deletions(-) diff --git a/lm_eval/tasks/bigbench/greedy_until/abstract_narrative_understanding.yaml b/lm_eval/tasks/bigbench/greedy_until/abstract_narrative_understanding.yaml index 462d1be9..dd041fdd 100644 --- a/lm_eval/tasks/bigbench/greedy_until/abstract_narrative_understanding.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/abstract_narrative_understanding.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: abstract_narrative_understanding +dataset_name: abstract_narrative_understanding_zero_shot include: ../greedy_until_template_yaml task: bigbench_abstract_narrative_understanding_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/anachronisms.yaml b/lm_eval/tasks/bigbench/greedy_until/anachronisms.yaml index d62133a0..9e723927 100644 --- a/lm_eval/tasks/bigbench/greedy_until/anachronisms.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/anachronisms.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: anachronisms +dataset_name: anachronisms_zero_shot include: ../greedy_until_template_yaml task: bigbench_anachronisms_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/analogical_similarity.yaml b/lm_eval/tasks/bigbench/greedy_until/analogical_similarity.yaml index 2fedcd91..3d2e82b4 100644 --- a/lm_eval/tasks/bigbench/greedy_until/analogical_similarity.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/analogical_similarity.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: analogical_similarity +dataset_name: analogical_similarity_zero_shot include: ../greedy_until_template_yaml task: bigbench_analogical_similarity_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/analytic_entailment.yaml b/lm_eval/tasks/bigbench/greedy_until/analytic_entailment.yaml index 58de1bcf..a8425049 100644 --- a/lm_eval/tasks/bigbench/greedy_until/analytic_entailment.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/analytic_entailment.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: analytic_entailment +dataset_name: analytic_entailment_zero_shot include: ../greedy_until_template_yaml task: bigbench_analytic_entailment_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/arithmetic.yaml b/lm_eval/tasks/bigbench/greedy_until/arithmetic.yaml index 6be6a787..be296b1b 100644 --- a/lm_eval/tasks/bigbench/greedy_until/arithmetic.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/arithmetic.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: arithmetic +dataset_name: arithmetic_zero_shot include: ../greedy_until_template_yaml task: bigbench_arithmetic_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/ascii_word_recognition.yaml b/lm_eval/tasks/bigbench/greedy_until/ascii_word_recognition.yaml index 0461605a..d199e8a5 100644 --- a/lm_eval/tasks/bigbench/greedy_until/ascii_word_recognition.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/ascii_word_recognition.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: ascii_word_recognition +dataset_name: ascii_word_recognition_zero_shot include: ../greedy_until_template_yaml task: bigbench_ascii_word_recognition_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/authorship_verification.yaml b/lm_eval/tasks/bigbench/greedy_until/authorship_verification.yaml index dbfa2103..65d8177c 100644 --- a/lm_eval/tasks/bigbench/greedy_until/authorship_verification.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/authorship_verification.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: authorship_verification +dataset_name: authorship_verification_zero_shot include: ../greedy_until_template_yaml task: bigbench_authorship_verification_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/auto_categorization.yaml b/lm_eval/tasks/bigbench/greedy_until/auto_categorization.yaml index 9ab1545e..3ce36427 100644 --- a/lm_eval/tasks/bigbench/greedy_until/auto_categorization.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/auto_categorization.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: auto_categorization +dataset_name: auto_categorization_zero_shot include: ../greedy_until_template_yaml task: bigbench_auto_categorization_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/auto_debugging.yaml b/lm_eval/tasks/bigbench/greedy_until/auto_debugging.yaml index e8a491c0..e25bee24 100644 --- a/lm_eval/tasks/bigbench/greedy_until/auto_debugging.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/auto_debugging.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: auto_debugging +dataset_name: auto_debugging_zero_shot include: ../greedy_until_template_yaml task: bigbench_auto_debugging_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/bbq_lite_json.yaml b/lm_eval/tasks/bigbench/greedy_until/bbq_lite_json.yaml index 8b97ba0a..d1d45477 100644 --- a/lm_eval/tasks/bigbench/greedy_until/bbq_lite_json.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/bbq_lite_json.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: bbq_lite_json +dataset_name: bbq_lite_json_zero_shot include: ../greedy_until_template_yaml task: bigbench_bbq_lite_json_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/bridging_anaphora_resolution_barqa.yaml b/lm_eval/tasks/bigbench/greedy_until/bridging_anaphora_resolution_barqa.yaml index 618d8dd3..a20da27f 100644 --- a/lm_eval/tasks/bigbench/greedy_until/bridging_anaphora_resolution_barqa.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/bridging_anaphora_resolution_barqa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: bridging_anaphora_resolution_barqa +dataset_name: bridging_anaphora_resolution_barqa_zero_shot include: ../greedy_until_template_yaml task: bigbench_bridging_anaphora_resolution_barqa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/causal_judgment.yaml b/lm_eval/tasks/bigbench/greedy_until/causal_judgment.yaml index 687d59ba..2b9c89af 100644 --- a/lm_eval/tasks/bigbench/greedy_until/causal_judgment.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/causal_judgment.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: causal_judgment +dataset_name: causal_judgment_zero_shot include: ../greedy_until_template_yaml task: bigbench_causal_judgment_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/cause_and_effect.yaml b/lm_eval/tasks/bigbench/greedy_until/cause_and_effect.yaml index a1f20264..5dd23108 100644 --- a/lm_eval/tasks/bigbench/greedy_until/cause_and_effect.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/cause_and_effect.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: cause_and_effect +dataset_name: cause_and_effect_zero_shot include: ../greedy_until_template_yaml task: bigbench_cause_and_effect_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/checkmate_in_one.yaml b/lm_eval/tasks/bigbench/greedy_until/checkmate_in_one.yaml index 4089a228..06681769 100644 --- a/lm_eval/tasks/bigbench/greedy_until/checkmate_in_one.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/checkmate_in_one.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: checkmate_in_one +dataset_name: checkmate_in_one_zero_shot include: ../greedy_until_template_yaml task: bigbench_checkmate_in_one_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/chess_state_tracking.yaml b/lm_eval/tasks/bigbench/greedy_until/chess_state_tracking.yaml index 727e7879..6a9a088e 100644 --- a/lm_eval/tasks/bigbench/greedy_until/chess_state_tracking.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/chess_state_tracking.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: chess_state_tracking +dataset_name: chess_state_tracking_zero_shot include: ../greedy_until_template_yaml task: bigbench_chess_state_tracking_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/chinese_remainder_theorem.yaml b/lm_eval/tasks/bigbench/greedy_until/chinese_remainder_theorem.yaml index 6af0bcbf..f3937088 100644 --- a/lm_eval/tasks/bigbench/greedy_until/chinese_remainder_theorem.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/chinese_remainder_theorem.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: chinese_remainder_theorem +dataset_name: chinese_remainder_theorem_zero_shot include: ../greedy_until_template_yaml task: bigbench_chinese_remainder_theorem_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/cifar10_classification.yaml b/lm_eval/tasks/bigbench/greedy_until/cifar10_classification.yaml index 3e0bf92c..6bad6797 100644 --- a/lm_eval/tasks/bigbench/greedy_until/cifar10_classification.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/cifar10_classification.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: cifar10_classification +dataset_name: cifar10_classification_zero_shot include: ../greedy_until_template_yaml task: bigbench_cifar10_classification_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/code_line_description.yaml b/lm_eval/tasks/bigbench/greedy_until/code_line_description.yaml index 624ab362..de1f7829 100644 --- a/lm_eval/tasks/bigbench/greedy_until/code_line_description.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/code_line_description.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: code_line_description +dataset_name: code_line_description_zero_shot include: ../greedy_until_template_yaml task: bigbench_code_line_description_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/codenames.yaml b/lm_eval/tasks/bigbench/greedy_until/codenames.yaml index 6ea8f12e..83feca88 100644 --- a/lm_eval/tasks/bigbench/greedy_until/codenames.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/codenames.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: codenames +dataset_name: codenames_zero_shot include: ../greedy_until_template_yaml task: bigbench_codenames_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/color.yaml b/lm_eval/tasks/bigbench/greedy_until/color.yaml index 4ae393fd..5aa9c1a9 100644 --- a/lm_eval/tasks/bigbench/greedy_until/color.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/color.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: color +dataset_name: color_zero_shot include: ../greedy_until_template_yaml task: bigbench_color_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/common_morpheme.yaml b/lm_eval/tasks/bigbench/greedy_until/common_morpheme.yaml index 90d183ad..ec0fdc44 100644 --- a/lm_eval/tasks/bigbench/greedy_until/common_morpheme.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/common_morpheme.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: common_morpheme +dataset_name: common_morpheme_zero_shot include: ../greedy_until_template_yaml task: bigbench_common_morpheme_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/conceptual_combinations.yaml b/lm_eval/tasks/bigbench/greedy_until/conceptual_combinations.yaml index 007649a4..5eaba446 100644 --- a/lm_eval/tasks/bigbench/greedy_until/conceptual_combinations.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/conceptual_combinations.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: conceptual_combinations +dataset_name: conceptual_combinations_zero_shot include: ../greedy_until_template_yaml task: bigbench_conceptual_combinations_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/conlang_translation.yaml b/lm_eval/tasks/bigbench/greedy_until/conlang_translation.yaml index 3b5bafac..afae8184 100644 --- a/lm_eval/tasks/bigbench/greedy_until/conlang_translation.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/conlang_translation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: conlang_translation +dataset_name: conlang_translation_zero_shot include: ../greedy_until_template_yaml task: bigbench_conlang_translation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/contextual_parametric_knowledge_conflicts.yaml b/lm_eval/tasks/bigbench/greedy_until/contextual_parametric_knowledge_conflicts.yaml index dc594b9b..bb7eba64 100644 --- a/lm_eval/tasks/bigbench/greedy_until/contextual_parametric_knowledge_conflicts.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/contextual_parametric_knowledge_conflicts.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: contextual_parametric_knowledge_conflicts +dataset_name: contextual_parametric_knowledge_conflicts_zero_shot include: ../greedy_until_template_yaml task: bigbench_contextual_parametric_knowledge_conflicts_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/crash_blossom.yaml b/lm_eval/tasks/bigbench/greedy_until/crash_blossom.yaml index aca19b7b..ae7f6b9f 100644 --- a/lm_eval/tasks/bigbench/greedy_until/crash_blossom.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/crash_blossom.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: crash_blossom +dataset_name: crash_blossom_zero_shot include: ../greedy_until_template_yaml task: bigbench_crash_blossom_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/crass_ai.yaml b/lm_eval/tasks/bigbench/greedy_until/crass_ai.yaml index 043e8f47..7d56bbc2 100644 --- a/lm_eval/tasks/bigbench/greedy_until/crass_ai.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/crass_ai.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: crass_ai +dataset_name: crass_ai_zero_shot include: ../greedy_until_template_yaml task: bigbench_crass_ai_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/cryobiology_spanish.yaml b/lm_eval/tasks/bigbench/greedy_until/cryobiology_spanish.yaml index eb9c5b3b..37fd99ad 100644 --- a/lm_eval/tasks/bigbench/greedy_until/cryobiology_spanish.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/cryobiology_spanish.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: cryobiology_spanish +dataset_name: cryobiology_spanish_zero_shot include: ../greedy_until_template_yaml task: bigbench_cryobiology_spanish_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/cryptonite.yaml b/lm_eval/tasks/bigbench/greedy_until/cryptonite.yaml index 15c181b2..64577738 100644 --- a/lm_eval/tasks/bigbench/greedy_until/cryptonite.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/cryptonite.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: cryptonite +dataset_name: cryptonite_zero_shot include: ../greedy_until_template_yaml task: bigbench_cryptonite_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/cs_algorithms.yaml b/lm_eval/tasks/bigbench/greedy_until/cs_algorithms.yaml index 477c2497..9279c295 100644 --- a/lm_eval/tasks/bigbench/greedy_until/cs_algorithms.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/cs_algorithms.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: cs_algorithms +dataset_name: cs_algorithms_zero_shot include: ../greedy_until_template_yaml task: bigbench_cs_algorithms_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/dark_humor_detection.yaml b/lm_eval/tasks/bigbench/greedy_until/dark_humor_detection.yaml index 0521848d..014d57e6 100644 --- a/lm_eval/tasks/bigbench/greedy_until/dark_humor_detection.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/dark_humor_detection.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: dark_humor_detection +dataset_name: dark_humor_detection_zero_shot include: ../greedy_until_template_yaml task: bigbench_dark_humor_detection_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/date_understanding.yaml b/lm_eval/tasks/bigbench/greedy_until/date_understanding.yaml index 5936e98f..999a7e71 100644 --- a/lm_eval/tasks/bigbench/greedy_until/date_understanding.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/date_understanding.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: date_understanding +dataset_name: date_understanding_zero_shot include: ../greedy_until_template_yaml task: bigbench_date_understanding_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/disambiguation_qa.yaml b/lm_eval/tasks/bigbench/greedy_until/disambiguation_qa.yaml index ffe5135a..db25589d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/disambiguation_qa.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/disambiguation_qa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: disambiguation_qa +dataset_name: disambiguation_qa_zero_shot include: ../greedy_until_template_yaml task: bigbench_disambiguation_qa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/discourse_marker_prediction.yaml b/lm_eval/tasks/bigbench/greedy_until/discourse_marker_prediction.yaml index aed41150..ae8941e8 100644 --- a/lm_eval/tasks/bigbench/greedy_until/discourse_marker_prediction.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/discourse_marker_prediction.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: discourse_marker_prediction +dataset_name: discourse_marker_prediction_zero_shot include: ../greedy_until_template_yaml task: bigbench_discourse_marker_prediction_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/disfl_qa.yaml b/lm_eval/tasks/bigbench/greedy_until/disfl_qa.yaml index fd087719..0086850a 100644 --- a/lm_eval/tasks/bigbench/greedy_until/disfl_qa.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/disfl_qa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: disfl_qa +dataset_name: disfl_qa_zero_shot include: ../greedy_until_template_yaml task: bigbench_disfl_qa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/dyck_languages.yaml b/lm_eval/tasks/bigbench/greedy_until/dyck_languages.yaml index af29b7f8..e8de0093 100644 --- a/lm_eval/tasks/bigbench/greedy_until/dyck_languages.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/dyck_languages.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: dyck_languages +dataset_name: dyck_languages_zero_shot include: ../greedy_until_template_yaml task: bigbench_dyck_languages_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/elementary_math_qa.yaml b/lm_eval/tasks/bigbench/greedy_until/elementary_math_qa.yaml index ea1a61ba..55369151 100644 --- a/lm_eval/tasks/bigbench/greedy_until/elementary_math_qa.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/elementary_math_qa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: elementary_math_qa +dataset_name: elementary_math_qa_zero_shot include: ../greedy_until_template_yaml task: bigbench_elementary_math_qa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/emoji_movie.yaml b/lm_eval/tasks/bigbench/greedy_until/emoji_movie.yaml index a8368f62..4553ede7 100644 --- a/lm_eval/tasks/bigbench/greedy_until/emoji_movie.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/emoji_movie.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: emoji_movie +dataset_name: emoji_movie_zero_shot include: ../greedy_until_template_yaml task: bigbench_emoji_movie_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/emojis_emotion_prediction.yaml b/lm_eval/tasks/bigbench/greedy_until/emojis_emotion_prediction.yaml index f8392d66..e570e24a 100644 --- a/lm_eval/tasks/bigbench/greedy_until/emojis_emotion_prediction.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/emojis_emotion_prediction.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: emojis_emotion_prediction +dataset_name: emojis_emotion_prediction_zero_shot include: ../greedy_until_template_yaml task: bigbench_emojis_emotion_prediction_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/empirical_judgments.yaml b/lm_eval/tasks/bigbench/greedy_until/empirical_judgments.yaml index 97ea08c8..d4f2f3cf 100644 --- a/lm_eval/tasks/bigbench/greedy_until/empirical_judgments.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/empirical_judgments.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: empirical_judgments +dataset_name: empirical_judgments_zero_shot include: ../greedy_until_template_yaml task: bigbench_empirical_judgments_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/english_proverbs.yaml b/lm_eval/tasks/bigbench/greedy_until/english_proverbs.yaml index 2eaa4a9b..b7628796 100644 --- a/lm_eval/tasks/bigbench/greedy_until/english_proverbs.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/english_proverbs.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: english_proverbs +dataset_name: english_proverbs_zero_shot include: ../greedy_until_template_yaml task: bigbench_english_proverbs_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/english_russian_proverbs.yaml b/lm_eval/tasks/bigbench/greedy_until/english_russian_proverbs.yaml index d0386e50..ea719e1d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/english_russian_proverbs.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/english_russian_proverbs.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: english_russian_proverbs +dataset_name: english_russian_proverbs_zero_shot include: ../greedy_until_template_yaml task: bigbench_english_russian_proverbs_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/entailed_polarity.yaml b/lm_eval/tasks/bigbench/greedy_until/entailed_polarity.yaml index efb1f853..e3d89fc2 100644 --- a/lm_eval/tasks/bigbench/greedy_until/entailed_polarity.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/entailed_polarity.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: entailed_polarity +dataset_name: entailed_polarity_zero_shot include: ../greedy_until_template_yaml task: bigbench_entailed_polarity_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/entailed_polarity_hindi.yaml b/lm_eval/tasks/bigbench/greedy_until/entailed_polarity_hindi.yaml index 5922a065..e416a059 100644 --- a/lm_eval/tasks/bigbench/greedy_until/entailed_polarity_hindi.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/entailed_polarity_hindi.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: entailed_polarity_hindi +dataset_name: entailed_polarity_hindi_zero_shot include: ../greedy_until_template_yaml task: bigbench_entailed_polarity_hindi_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/epistemic_reasoning.yaml b/lm_eval/tasks/bigbench/greedy_until/epistemic_reasoning.yaml index d6307592..8f8efc4e 100644 --- a/lm_eval/tasks/bigbench/greedy_until/epistemic_reasoning.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/epistemic_reasoning.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: epistemic_reasoning +dataset_name: epistemic_reasoning_zero_shot include: ../greedy_until_template_yaml task: bigbench_epistemic_reasoning_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/evaluating_information_essentiality.yaml b/lm_eval/tasks/bigbench/greedy_until/evaluating_information_essentiality.yaml index 13b6dd32..b35240c4 100644 --- a/lm_eval/tasks/bigbench/greedy_until/evaluating_information_essentiality.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/evaluating_information_essentiality.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: evaluating_information_essentiality +dataset_name: evaluating_information_essentiality_zero_shot include: ../greedy_until_template_yaml task: bigbench_evaluating_information_essentiality_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/fact_checker.yaml b/lm_eval/tasks/bigbench/greedy_until/fact_checker.yaml index 6d3ccf9f..f83e4081 100644 --- a/lm_eval/tasks/bigbench/greedy_until/fact_checker.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/fact_checker.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: fact_checker +dataset_name: fact_checker_zero_shot include: ../greedy_until_template_yaml task: bigbench_fact_checker_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/fantasy_reasoning.yaml b/lm_eval/tasks/bigbench/greedy_until/fantasy_reasoning.yaml index 16415a7b..ab38359d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/fantasy_reasoning.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/fantasy_reasoning.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: fantasy_reasoning +dataset_name: fantasy_reasoning_zero_shot include: ../greedy_until_template_yaml task: bigbench_fantasy_reasoning_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/few_shot_nlg.yaml b/lm_eval/tasks/bigbench/greedy_until/few_shot_nlg.yaml index 229e1c70..bf1e33e0 100644 --- a/lm_eval/tasks/bigbench/greedy_until/few_shot_nlg.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/few_shot_nlg.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: few_shot_nlg +dataset_name: few_shot_nlg_zero_shot include: ../greedy_until_template_yaml task: bigbench_few_shot_nlg_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/figure_of_speech_detection.yaml b/lm_eval/tasks/bigbench/greedy_until/figure_of_speech_detection.yaml index 059f9f33..184cd4e6 100644 --- a/lm_eval/tasks/bigbench/greedy_until/figure_of_speech_detection.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/figure_of_speech_detection.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: figure_of_speech_detection +dataset_name: figure_of_speech_detection_zero_shot include: ../greedy_until_template_yaml task: bigbench_figure_of_speech_detection_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/formal_fallacies_syllogisms_negation.yaml b/lm_eval/tasks/bigbench/greedy_until/formal_fallacies_syllogisms_negation.yaml index 663a718c..cb1915b8 100644 --- a/lm_eval/tasks/bigbench/greedy_until/formal_fallacies_syllogisms_negation.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/formal_fallacies_syllogisms_negation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: formal_fallacies_syllogisms_negation +dataset_name: formal_fallacies_syllogisms_negation_zero_shot include: ../greedy_until_template_yaml task: bigbench_formal_fallacies_syllogisms_negation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/gem.yaml b/lm_eval/tasks/bigbench/greedy_until/gem.yaml index 79492583..aa43ca45 100644 --- a/lm_eval/tasks/bigbench/greedy_until/gem.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/gem.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: gem +dataset_name: gem_zero_shot include: ../greedy_until_template_yaml task: bigbench_gem_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/gender_inclusive_sentences_german.yaml b/lm_eval/tasks/bigbench/greedy_until/gender_inclusive_sentences_german.yaml index 10414179..6471e577 100644 --- a/lm_eval/tasks/bigbench/greedy_until/gender_inclusive_sentences_german.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/gender_inclusive_sentences_german.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: gender_inclusive_sentences_german +dataset_name: gender_inclusive_sentences_german_zero_shot include: ../greedy_until_template_yaml task: bigbench_gender_inclusive_sentences_german_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/general_knowledge.yaml b/lm_eval/tasks/bigbench/greedy_until/general_knowledge.yaml index b2a14656..93a3f875 100644 --- a/lm_eval/tasks/bigbench/greedy_until/general_knowledge.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/general_knowledge.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: general_knowledge +dataset_name: general_knowledge_zero_shot include: ../greedy_until_template_yaml task: bigbench_general_knowledge_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/geometric_shapes.yaml b/lm_eval/tasks/bigbench/greedy_until/geometric_shapes.yaml index 4e256462..c3a5d9a7 100644 --- a/lm_eval/tasks/bigbench/greedy_until/geometric_shapes.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/geometric_shapes.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: geometric_shapes +dataset_name: geometric_shapes_zero_shot include: ../greedy_until_template_yaml task: bigbench_geometric_shapes_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/goal_step_wikihow.yaml b/lm_eval/tasks/bigbench/greedy_until/goal_step_wikihow.yaml index d865e3d4..6fd557d3 100644 --- a/lm_eval/tasks/bigbench/greedy_until/goal_step_wikihow.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/goal_step_wikihow.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: goal_step_wikihow +dataset_name: goal_step_wikihow_zero_shot include: ../greedy_until_template_yaml task: bigbench_goal_step_wikihow_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/gre_reading_comprehension.yaml b/lm_eval/tasks/bigbench/greedy_until/gre_reading_comprehension.yaml index 9f044835..c4416b10 100644 --- a/lm_eval/tasks/bigbench/greedy_until/gre_reading_comprehension.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/gre_reading_comprehension.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: gre_reading_comprehension +dataset_name: gre_reading_comprehension_zero_shot include: ../greedy_until_template_yaml task: bigbench_gre_reading_comprehension_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/hhh_alignment.yaml b/lm_eval/tasks/bigbench/greedy_until/hhh_alignment.yaml index 1ab62b56..4060824c 100644 --- a/lm_eval/tasks/bigbench/greedy_until/hhh_alignment.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/hhh_alignment.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: hhh_alignment +dataset_name: hhh_alignment_zero_shot include: ../greedy_until_template_yaml task: bigbench_hhh_alignment_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/hindi_question_answering.yaml b/lm_eval/tasks/bigbench/greedy_until/hindi_question_answering.yaml index 3a0fa8b2..5c4791b4 100644 --- a/lm_eval/tasks/bigbench/greedy_until/hindi_question_answering.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/hindi_question_answering.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: hindi_question_answering +dataset_name: hindi_question_answering_zero_shot include: ../greedy_until_template_yaml task: bigbench_hindi_question_answering_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/hindu_knowledge.yaml b/lm_eval/tasks/bigbench/greedy_until/hindu_knowledge.yaml index 19162629..040441f7 100644 --- a/lm_eval/tasks/bigbench/greedy_until/hindu_knowledge.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/hindu_knowledge.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: hindu_knowledge +dataset_name: hindu_knowledge_zero_shot include: ../greedy_until_template_yaml task: bigbench_hindu_knowledge_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/hinglish_toxicity.yaml b/lm_eval/tasks/bigbench/greedy_until/hinglish_toxicity.yaml index 84073aa0..0eb98e51 100644 --- a/lm_eval/tasks/bigbench/greedy_until/hinglish_toxicity.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/hinglish_toxicity.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: hinglish_toxicity +dataset_name: hinglish_toxicity_zero_shot include: ../greedy_until_template_yaml task: bigbench_hinglish_toxicity_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/human_organs_senses.yaml b/lm_eval/tasks/bigbench/greedy_until/human_organs_senses.yaml index 32fc0058..c5541571 100644 --- a/lm_eval/tasks/bigbench/greedy_until/human_organs_senses.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/human_organs_senses.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: human_organs_senses +dataset_name: human_organs_senses_zero_shot include: ../greedy_until_template_yaml task: bigbench_human_organs_senses_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/hyperbaton.yaml b/lm_eval/tasks/bigbench/greedy_until/hyperbaton.yaml index d3a65a87..4368f4c9 100644 --- a/lm_eval/tasks/bigbench/greedy_until/hyperbaton.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/hyperbaton.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: hyperbaton +dataset_name: hyperbaton_zero_shot include: ../greedy_until_template_yaml task: bigbench_hyperbaton_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/identify_math_theorems.yaml b/lm_eval/tasks/bigbench/greedy_until/identify_math_theorems.yaml index 616085c8..2c08703e 100644 --- a/lm_eval/tasks/bigbench/greedy_until/identify_math_theorems.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/identify_math_theorems.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: identify_math_theorems +dataset_name: identify_math_theorems_zero_shot include: ../greedy_until_template_yaml task: bigbench_identify_math_theorems_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/identify_odd_metaphor.yaml b/lm_eval/tasks/bigbench/greedy_until/identify_odd_metaphor.yaml index 6500f7a9..9cb39d0d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/identify_odd_metaphor.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/identify_odd_metaphor.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: identify_odd_metaphor +dataset_name: identify_odd_metaphor_zero_shot include: ../greedy_until_template_yaml task: bigbench_identify_odd_metaphor_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/implicatures.yaml b/lm_eval/tasks/bigbench/greedy_until/implicatures.yaml index fdc133f5..e216762c 100644 --- a/lm_eval/tasks/bigbench/greedy_until/implicatures.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/implicatures.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: implicatures +dataset_name: implicatures_zero_shot include: ../greedy_until_template_yaml task: bigbench_implicatures_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/implicit_relations.yaml b/lm_eval/tasks/bigbench/greedy_until/implicit_relations.yaml index b05af0ad..c7a82a10 100644 --- a/lm_eval/tasks/bigbench/greedy_until/implicit_relations.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/implicit_relations.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: implicit_relations +dataset_name: implicit_relations_zero_shot include: ../greedy_until_template_yaml task: bigbench_implicit_relations_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/intent_recognition.yaml b/lm_eval/tasks/bigbench/greedy_until/intent_recognition.yaml index 37769770..4839afa2 100644 --- a/lm_eval/tasks/bigbench/greedy_until/intent_recognition.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/intent_recognition.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: intent_recognition +dataset_name: intent_recognition_zero_shot include: ../greedy_until_template_yaml task: bigbench_intent_recognition_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_nli.yaml b/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_nli.yaml index 81b975c9..62643a46 100644 --- a/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_nli.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_nli.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: international_phonetic_alphabet_nli +dataset_name: international_phonetic_alphabet_nli_zero_shot include: ../greedy_until_template_yaml task: bigbench_international_phonetic_alphabet_nli_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_transliterate.yaml b/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_transliterate.yaml index ac664332..05feb4f5 100644 --- a/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_transliterate.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/international_phonetic_alphabet_transliterate.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: international_phonetic_alphabet_transliterate +dataset_name: international_phonetic_alphabet_transliterate_zero_shot include: ../greedy_until_template_yaml task: bigbench_international_phonetic_alphabet_transliterate_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/intersect_geometry.yaml b/lm_eval/tasks/bigbench/greedy_until/intersect_geometry.yaml index d08f1d6a..57745d23 100644 --- a/lm_eval/tasks/bigbench/greedy_until/intersect_geometry.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/intersect_geometry.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: intersect_geometry +dataset_name: intersect_geometry_zero_shot include: ../greedy_until_template_yaml task: bigbench_intersect_geometry_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/irony_identification.yaml b/lm_eval/tasks/bigbench/greedy_until/irony_identification.yaml index d9d5961c..b49dfb44 100644 --- a/lm_eval/tasks/bigbench/greedy_until/irony_identification.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/irony_identification.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: irony_identification +dataset_name: irony_identification_zero_shot include: ../greedy_until_template_yaml task: bigbench_irony_identification_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/kanji_ascii.yaml b/lm_eval/tasks/bigbench/greedy_until/kanji_ascii.yaml index b6a7470f..293ff6c2 100644 --- a/lm_eval/tasks/bigbench/greedy_until/kanji_ascii.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/kanji_ascii.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: kanji_ascii +dataset_name: kanji_ascii_zero_shot include: ../greedy_until_template_yaml task: bigbench_kanji_ascii_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/kannada.yaml b/lm_eval/tasks/bigbench/greedy_until/kannada.yaml index 50ad13c1..00eeb32a 100644 --- a/lm_eval/tasks/bigbench/greedy_until/kannada.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/kannada.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: kannada +dataset_name: kannada_zero_shot include: ../greedy_until_template_yaml task: bigbench_kannada_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/key_value_maps.yaml b/lm_eval/tasks/bigbench/greedy_until/key_value_maps.yaml index 6d5ad040..d313e1ce 100644 --- a/lm_eval/tasks/bigbench/greedy_until/key_value_maps.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/key_value_maps.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: key_value_maps +dataset_name: key_value_maps_zero_shot include: ../greedy_until_template_yaml task: bigbench_key_value_maps_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/known_unknowns.yaml b/lm_eval/tasks/bigbench/greedy_until/known_unknowns.yaml index c07e0e8c..d72e1d37 100644 --- a/lm_eval/tasks/bigbench/greedy_until/known_unknowns.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/known_unknowns.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: known_unknowns +dataset_name: known_unknowns_zero_shot include: ../greedy_until_template_yaml task: bigbench_known_unknowns_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/language_games.yaml b/lm_eval/tasks/bigbench/greedy_until/language_games.yaml index 392a7190..61e85b53 100644 --- a/lm_eval/tasks/bigbench/greedy_until/language_games.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/language_games.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: language_games +dataset_name: language_games_zero_shot include: ../greedy_until_template_yaml task: bigbench_language_games_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/language_identification.yaml b/lm_eval/tasks/bigbench/greedy_until/language_identification.yaml index 583d9108..8db65637 100644 --- a/lm_eval/tasks/bigbench/greedy_until/language_identification.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/language_identification.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: language_identification +dataset_name: language_identification_zero_shot include: ../greedy_until_template_yaml task: bigbench_language_identification_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/linguistic_mappings.yaml b/lm_eval/tasks/bigbench/greedy_until/linguistic_mappings.yaml index 92a855a8..db6e9832 100644 --- a/lm_eval/tasks/bigbench/greedy_until/linguistic_mappings.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/linguistic_mappings.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: linguistic_mappings +dataset_name: linguistic_mappings_zero_shot include: ../greedy_until_template_yaml task: bigbench_linguistic_mappings_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/linguistics_puzzles.yaml b/lm_eval/tasks/bigbench/greedy_until/linguistics_puzzles.yaml index 7aec6607..4e3981f4 100644 --- a/lm_eval/tasks/bigbench/greedy_until/linguistics_puzzles.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/linguistics_puzzles.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: linguistics_puzzles +dataset_name: linguistics_puzzles_zero_shot include: ../greedy_until_template_yaml task: bigbench_linguistics_puzzles_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/list_functions.yaml b/lm_eval/tasks/bigbench/greedy_until/list_functions.yaml index f7f0d436..32afff69 100644 --- a/lm_eval/tasks/bigbench/greedy_until/list_functions.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/list_functions.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: list_functions +dataset_name: list_functions_zero_shot include: ../greedy_until_template_yaml task: bigbench_list_functions_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/logic_grid_puzzle.yaml b/lm_eval/tasks/bigbench/greedy_until/logic_grid_puzzle.yaml index 2699b12f..a1d1b5b1 100644 --- a/lm_eval/tasks/bigbench/greedy_until/logic_grid_puzzle.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/logic_grid_puzzle.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: logic_grid_puzzle +dataset_name: logic_grid_puzzle_zero_shot include: ../greedy_until_template_yaml task: bigbench_logic_grid_puzzle_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/logical_args.yaml b/lm_eval/tasks/bigbench/greedy_until/logical_args.yaml index 9a263f96..201c04ae 100644 --- a/lm_eval/tasks/bigbench/greedy_until/logical_args.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/logical_args.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: logical_args +dataset_name: logical_args_zero_shot include: ../greedy_until_template_yaml task: bigbench_logical_args_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/logical_deduction.yaml b/lm_eval/tasks/bigbench/greedy_until/logical_deduction.yaml index 5e72facb..1b77561d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/logical_deduction.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/logical_deduction.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: logical_deduction +dataset_name: logical_deduction_zero_shot include: ../greedy_until_template_yaml task: bigbench_logical_deduction_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/logical_fallacy_detection.yaml b/lm_eval/tasks/bigbench/greedy_until/logical_fallacy_detection.yaml index a21fbc58..af3e9ea4 100644 --- a/lm_eval/tasks/bigbench/greedy_until/logical_fallacy_detection.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/logical_fallacy_detection.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: logical_fallacy_detection +dataset_name: logical_fallacy_detection_zero_shot include: ../greedy_until_template_yaml task: bigbench_logical_fallacy_detection_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/logical_sequence.yaml b/lm_eval/tasks/bigbench/greedy_until/logical_sequence.yaml index f01ce277..4d4ffe1d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/logical_sequence.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/logical_sequence.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: logical_sequence +dataset_name: logical_sequence_zero_shot include: ../greedy_until_template_yaml task: bigbench_logical_sequence_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/mathematical_induction.yaml b/lm_eval/tasks/bigbench/greedy_until/mathematical_induction.yaml index d4b2fcf6..84d0f419 100644 --- a/lm_eval/tasks/bigbench/greedy_until/mathematical_induction.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/mathematical_induction.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: mathematical_induction +dataset_name: mathematical_induction_zero_shot include: ../greedy_until_template_yaml task: bigbench_mathematical_induction_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/matrixshapes.yaml b/lm_eval/tasks/bigbench/greedy_until/matrixshapes.yaml index adf86ecc..956aa5f0 100644 --- a/lm_eval/tasks/bigbench/greedy_until/matrixshapes.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/matrixshapes.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: matrixshapes +dataset_name: matrixshapes_zero_shot include: ../greedy_until_template_yaml task: bigbench_matrixshapes_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/metaphor_boolean.yaml b/lm_eval/tasks/bigbench/greedy_until/metaphor_boolean.yaml index 94893b4d..7fd4e53c 100644 --- a/lm_eval/tasks/bigbench/greedy_until/metaphor_boolean.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/metaphor_boolean.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: metaphor_boolean +dataset_name: metaphor_boolean_zero_shot include: ../greedy_until_template_yaml task: bigbench_metaphor_boolean_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/metaphor_understanding.yaml b/lm_eval/tasks/bigbench/greedy_until/metaphor_understanding.yaml index 8ca4da75..12b79d44 100644 --- a/lm_eval/tasks/bigbench/greedy_until/metaphor_understanding.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/metaphor_understanding.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: metaphor_understanding +dataset_name: metaphor_understanding_zero_shot include: ../greedy_until_template_yaml task: bigbench_metaphor_understanding_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/minute_mysteries_qa.yaml b/lm_eval/tasks/bigbench/greedy_until/minute_mysteries_qa.yaml index b9db2b8f..459aec57 100644 --- a/lm_eval/tasks/bigbench/greedy_until/minute_mysteries_qa.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/minute_mysteries_qa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: minute_mysteries_qa +dataset_name: minute_mysteries_qa_zero_shot include: ../greedy_until_template_yaml task: bigbench_minute_mysteries_qa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/misconceptions.yaml b/lm_eval/tasks/bigbench/greedy_until/misconceptions.yaml index 60c8221b..25038ae3 100644 --- a/lm_eval/tasks/bigbench/greedy_until/misconceptions.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/misconceptions.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: misconceptions +dataset_name: misconceptions_zero_shot include: ../greedy_until_template_yaml task: bigbench_misconceptions_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/misconceptions_russian.yaml b/lm_eval/tasks/bigbench/greedy_until/misconceptions_russian.yaml index a1fca685..676d94ea 100644 --- a/lm_eval/tasks/bigbench/greedy_until/misconceptions_russian.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/misconceptions_russian.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: misconceptions_russian +dataset_name: misconceptions_russian_zero_shot include: ../greedy_until_template_yaml task: bigbench_misconceptions_russian_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/mnist_ascii.yaml b/lm_eval/tasks/bigbench/greedy_until/mnist_ascii.yaml index b845caa3..19c9a82b 100644 --- a/lm_eval/tasks/bigbench/greedy_until/mnist_ascii.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/mnist_ascii.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: mnist_ascii +dataset_name: mnist_ascii_zero_shot include: ../greedy_until_template_yaml task: bigbench_mnist_ascii_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/modified_arithmetic.yaml b/lm_eval/tasks/bigbench/greedy_until/modified_arithmetic.yaml index 5dc888f6..313b5b9d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/modified_arithmetic.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/modified_arithmetic.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: modified_arithmetic +dataset_name: modified_arithmetic_zero_shot include: ../greedy_until_template_yaml task: bigbench_modified_arithmetic_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/moral_permissibility.yaml b/lm_eval/tasks/bigbench/greedy_until/moral_permissibility.yaml index a20c23be..f478ed24 100644 --- a/lm_eval/tasks/bigbench/greedy_until/moral_permissibility.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/moral_permissibility.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: moral_permissibility +dataset_name: moral_permissibility_zero_shot include: ../greedy_until_template_yaml task: bigbench_moral_permissibility_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/movie_dialog_same_or_different.yaml b/lm_eval/tasks/bigbench/greedy_until/movie_dialog_same_or_different.yaml index db57a939..98e06e5d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/movie_dialog_same_or_different.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/movie_dialog_same_or_different.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: movie_dialog_same_or_different +dataset_name: movie_dialog_same_or_different_zero_shot include: ../greedy_until_template_yaml task: bigbench_movie_dialog_same_or_different_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/movie_recommendation.yaml b/lm_eval/tasks/bigbench/greedy_until/movie_recommendation.yaml index 00a0c1a4..7cd021a4 100644 --- a/lm_eval/tasks/bigbench/greedy_until/movie_recommendation.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/movie_recommendation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: movie_recommendation +dataset_name: movie_recommendation_zero_shot include: ../greedy_until_template_yaml task: bigbench_movie_recommendation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/mult_data_wrangling.yaml b/lm_eval/tasks/bigbench/greedy_until/mult_data_wrangling.yaml index 7a1003cf..92b84838 100644 --- a/lm_eval/tasks/bigbench/greedy_until/mult_data_wrangling.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/mult_data_wrangling.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: mult_data_wrangling +dataset_name: mult_data_wrangling_zero_shot include: ../greedy_until_template_yaml task: bigbench_mult_data_wrangling_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/multiemo.yaml b/lm_eval/tasks/bigbench/greedy_until/multiemo.yaml index df230d77..ac4f9432 100644 --- a/lm_eval/tasks/bigbench/greedy_until/multiemo.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/multiemo.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: multiemo +dataset_name: multiemo_zero_shot include: ../greedy_until_template_yaml task: bigbench_multiemo_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/natural_instructions.yaml b/lm_eval/tasks/bigbench/greedy_until/natural_instructions.yaml index cc800106..0b87004d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/natural_instructions.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/natural_instructions.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: natural_instructions +dataset_name: natural_instructions_zero_shot include: ../greedy_until_template_yaml task: bigbench_natural_instructions_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/navigate.yaml b/lm_eval/tasks/bigbench/greedy_until/navigate.yaml index 1e3004d5..85fd618b 100644 --- a/lm_eval/tasks/bigbench/greedy_until/navigate.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/navigate.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: navigate +dataset_name: navigate_zero_shot include: ../greedy_until_template_yaml task: bigbench_navigate_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/nonsense_words_grammar.yaml b/lm_eval/tasks/bigbench/greedy_until/nonsense_words_grammar.yaml index 169b6743..863b0a85 100644 --- a/lm_eval/tasks/bigbench/greedy_until/nonsense_words_grammar.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/nonsense_words_grammar.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: nonsense_words_grammar +dataset_name: nonsense_words_grammar_zero_shot include: ../greedy_until_template_yaml task: bigbench_nonsense_words_grammar_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/novel_concepts.yaml b/lm_eval/tasks/bigbench/greedy_until/novel_concepts.yaml index 9618dce2..b3b08806 100644 --- a/lm_eval/tasks/bigbench/greedy_until/novel_concepts.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/novel_concepts.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: novel_concepts +dataset_name: novel_concepts_zero_shot include: ../greedy_until_template_yaml task: bigbench_novel_concepts_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/object_counting.yaml b/lm_eval/tasks/bigbench/greedy_until/object_counting.yaml index 7b058748..fc0d6119 100644 --- a/lm_eval/tasks/bigbench/greedy_until/object_counting.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/object_counting.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: object_counting +dataset_name: object_counting_zero_shot include: ../greedy_until_template_yaml task: bigbench_object_counting_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/odd_one_out.yaml b/lm_eval/tasks/bigbench/greedy_until/odd_one_out.yaml index 1742789e..90d0fd93 100644 --- a/lm_eval/tasks/bigbench/greedy_until/odd_one_out.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/odd_one_out.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: odd_one_out +dataset_name: odd_one_out_zero_shot include: ../greedy_until_template_yaml task: bigbench_odd_one_out_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/operators.yaml b/lm_eval/tasks/bigbench/greedy_until/operators.yaml index d71d87c2..d4ad9b91 100644 --- a/lm_eval/tasks/bigbench/greedy_until/operators.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/operators.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: operators +dataset_name: operators_zero_shot include: ../greedy_until_template_yaml task: bigbench_operators_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/paragraph_segmentation.yaml b/lm_eval/tasks/bigbench/greedy_until/paragraph_segmentation.yaml index 13d8fb9d..c661e1a7 100644 --- a/lm_eval/tasks/bigbench/greedy_until/paragraph_segmentation.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/paragraph_segmentation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: paragraph_segmentation +dataset_name: paragraph_segmentation_zero_shot include: ../greedy_until_template_yaml task: bigbench_paragraph_segmentation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/parsinlu_qa.yaml b/lm_eval/tasks/bigbench/greedy_until/parsinlu_qa.yaml index f8b78f8d..4ea51e21 100644 --- a/lm_eval/tasks/bigbench/greedy_until/parsinlu_qa.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/parsinlu_qa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: parsinlu_qa +dataset_name: parsinlu_qa_zero_shot include: ../greedy_until_template_yaml task: bigbench_parsinlu_qa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/parsinlu_reading_comprehension.yaml b/lm_eval/tasks/bigbench/greedy_until/parsinlu_reading_comprehension.yaml index 4db292d0..967741fd 100644 --- a/lm_eval/tasks/bigbench/greedy_until/parsinlu_reading_comprehension.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/parsinlu_reading_comprehension.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: parsinlu_reading_comprehension +dataset_name: parsinlu_reading_comprehension_zero_shot include: ../greedy_until_template_yaml task: bigbench_parsinlu_reading_comprehension_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/penguins_in_a_table.yaml b/lm_eval/tasks/bigbench/greedy_until/penguins_in_a_table.yaml index a282fa64..5e59b741 100644 --- a/lm_eval/tasks/bigbench/greedy_until/penguins_in_a_table.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/penguins_in_a_table.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: penguins_in_a_table +dataset_name: penguins_in_a_table_zero_shot include: ../greedy_until_template_yaml task: bigbench_penguins_in_a_table_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/periodic_elements.yaml b/lm_eval/tasks/bigbench/greedy_until/periodic_elements.yaml index 458a2e3d..a7ed5a82 100644 --- a/lm_eval/tasks/bigbench/greedy_until/periodic_elements.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/periodic_elements.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: periodic_elements +dataset_name: periodic_elements_zero_shot include: ../greedy_until_template_yaml task: bigbench_periodic_elements_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/persian_idioms.yaml b/lm_eval/tasks/bigbench/greedy_until/persian_idioms.yaml index e51eb69a..087d4688 100644 --- a/lm_eval/tasks/bigbench/greedy_until/persian_idioms.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/persian_idioms.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: persian_idioms +dataset_name: persian_idioms_zero_shot include: ../greedy_until_template_yaml task: bigbench_persian_idioms_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/phrase_relatedness.yaml b/lm_eval/tasks/bigbench/greedy_until/phrase_relatedness.yaml index 3b03a67f..c2da5cce 100644 --- a/lm_eval/tasks/bigbench/greedy_until/phrase_relatedness.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/phrase_relatedness.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: phrase_relatedness +dataset_name: phrase_relatedness_zero_shot include: ../greedy_until_template_yaml task: bigbench_phrase_relatedness_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/physical_intuition.yaml b/lm_eval/tasks/bigbench/greedy_until/physical_intuition.yaml index 358b7db3..1482fe65 100644 --- a/lm_eval/tasks/bigbench/greedy_until/physical_intuition.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/physical_intuition.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: physical_intuition +dataset_name: physical_intuition_zero_shot include: ../greedy_until_template_yaml task: bigbench_physical_intuition_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/physics.yaml b/lm_eval/tasks/bigbench/greedy_until/physics.yaml index d9d6f936..7fade7b3 100644 --- a/lm_eval/tasks/bigbench/greedy_until/physics.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/physics.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: physics +dataset_name: physics_zero_shot include: ../greedy_until_template_yaml task: bigbench_physics_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/physics_questions.yaml b/lm_eval/tasks/bigbench/greedy_until/physics_questions.yaml index 6af11448..bf332361 100644 --- a/lm_eval/tasks/bigbench/greedy_until/physics_questions.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/physics_questions.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: physics_questions +dataset_name: physics_questions_zero_shot include: ../greedy_until_template_yaml task: bigbench_physics_questions_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/play_dialog_same_or_different.yaml b/lm_eval/tasks/bigbench/greedy_until/play_dialog_same_or_different.yaml index 600143e9..1ddf7ca7 100644 --- a/lm_eval/tasks/bigbench/greedy_until/play_dialog_same_or_different.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/play_dialog_same_or_different.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: play_dialog_same_or_different +dataset_name: play_dialog_same_or_different_zero_shot include: ../greedy_until_template_yaml task: bigbench_play_dialog_same_or_different_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/polish_sequence_labeling.yaml b/lm_eval/tasks/bigbench/greedy_until/polish_sequence_labeling.yaml index 432820ad..10c8bd98 100644 --- a/lm_eval/tasks/bigbench/greedy_until/polish_sequence_labeling.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/polish_sequence_labeling.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: polish_sequence_labeling +dataset_name: polish_sequence_labeling_zero_shot include: ../greedy_until_template_yaml task: bigbench_polish_sequence_labeling_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/presuppositions_as_nli.yaml b/lm_eval/tasks/bigbench/greedy_until/presuppositions_as_nli.yaml index c492b17f..66d0e5ea 100644 --- a/lm_eval/tasks/bigbench/greedy_until/presuppositions_as_nli.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/presuppositions_as_nli.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: presuppositions_as_nli +dataset_name: presuppositions_as_nli_zero_shot include: ../greedy_until_template_yaml task: bigbench_presuppositions_as_nli_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/qa_wikidata.yaml b/lm_eval/tasks/bigbench/greedy_until/qa_wikidata.yaml index a23ea6e7..67240110 100644 --- a/lm_eval/tasks/bigbench/greedy_until/qa_wikidata.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/qa_wikidata.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: qa_wikidata +dataset_name: qa_wikidata_zero_shot include: ../greedy_until_template_yaml task: bigbench_qa_wikidata_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/question_selection.yaml b/lm_eval/tasks/bigbench/greedy_until/question_selection.yaml index 47953c14..5652cb3f 100644 --- a/lm_eval/tasks/bigbench/greedy_until/question_selection.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/question_selection.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: question_selection +dataset_name: question_selection_zero_shot include: ../greedy_until_template_yaml task: bigbench_question_selection_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/real_or_fake_text.yaml b/lm_eval/tasks/bigbench/greedy_until/real_or_fake_text.yaml index e15af76e..c206597b 100644 --- a/lm_eval/tasks/bigbench/greedy_until/real_or_fake_text.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/real_or_fake_text.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: real_or_fake_text +dataset_name: real_or_fake_text_zero_shot include: ../greedy_until_template_yaml task: bigbench_real_or_fake_text_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bigbench/greedy_until/reasoning_about_colored_objects.yaml index b1aa5ec0..8b1051e5 100644 --- a/lm_eval/tasks/bigbench/greedy_until/reasoning_about_colored_objects.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/reasoning_about_colored_objects.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: reasoning_about_colored_objects +dataset_name: reasoning_about_colored_objects_zero_shot include: ../greedy_until_template_yaml task: bigbench_reasoning_about_colored_objects_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/repeat_copy_logic.yaml b/lm_eval/tasks/bigbench/greedy_until/repeat_copy_logic.yaml index 12831cc7..279ecd01 100644 --- a/lm_eval/tasks/bigbench/greedy_until/repeat_copy_logic.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/repeat_copy_logic.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: repeat_copy_logic +dataset_name: repeat_copy_logic_zero_shot include: ../greedy_until_template_yaml task: bigbench_repeat_copy_logic_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/rephrase.yaml b/lm_eval/tasks/bigbench/greedy_until/rephrase.yaml index 78c6bda7..90135638 100644 --- a/lm_eval/tasks/bigbench/greedy_until/rephrase.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/rephrase.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: rephrase +dataset_name: rephrase_zero_shot include: ../greedy_until_template_yaml task: bigbench_rephrase_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/riddle_sense.yaml b/lm_eval/tasks/bigbench/greedy_until/riddle_sense.yaml index e93b4aa9..a11c167d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/riddle_sense.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/riddle_sense.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: riddle_sense +dataset_name: riddle_sense_zero_shot include: ../greedy_until_template_yaml task: bigbench_riddle_sense_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/ruin_names.yaml b/lm_eval/tasks/bigbench/greedy_until/ruin_names.yaml index 46039e1f..5074e010 100644 --- a/lm_eval/tasks/bigbench/greedy_until/ruin_names.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/ruin_names.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: ruin_names +dataset_name: ruin_names_zero_shot include: ../greedy_until_template_yaml task: bigbench_ruin_names_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/salient_translation_error_detection.yaml b/lm_eval/tasks/bigbench/greedy_until/salient_translation_error_detection.yaml index a7e5c77e..7f2ce433 100644 --- a/lm_eval/tasks/bigbench/greedy_until/salient_translation_error_detection.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/salient_translation_error_detection.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: salient_translation_error_detection +dataset_name: salient_translation_error_detection_zero_shot include: ../greedy_until_template_yaml task: bigbench_salient_translation_error_detection_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/scientific_press_release.yaml b/lm_eval/tasks/bigbench/greedy_until/scientific_press_release.yaml index aa35e659..90071882 100644 --- a/lm_eval/tasks/bigbench/greedy_until/scientific_press_release.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/scientific_press_release.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: scientific_press_release +dataset_name: scientific_press_release_zero_shot include: ../greedy_until_template_yaml task: bigbench_scientific_press_release_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_in_context_sparc.yaml b/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_in_context_sparc.yaml index 184bfcb9..93ddccc2 100644 --- a/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_in_context_sparc.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_in_context_sparc.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: semantic_parsing_in_context_sparc +dataset_name: semantic_parsing_in_context_sparc_zero_shot include: ../greedy_until_template_yaml task: bigbench_semantic_parsing_in_context_sparc_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_spider.yaml b/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_spider.yaml index ae0b9461..cc590faf 100644 --- a/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_spider.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/semantic_parsing_spider.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: semantic_parsing_spider +dataset_name: semantic_parsing_spider_zero_shot include: ../greedy_until_template_yaml task: bigbench_semantic_parsing_spider_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/sentence_ambiguity.yaml b/lm_eval/tasks/bigbench/greedy_until/sentence_ambiguity.yaml index bb72ec88..6cbacb79 100644 --- a/lm_eval/tasks/bigbench/greedy_until/sentence_ambiguity.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/sentence_ambiguity.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: sentence_ambiguity +dataset_name: sentence_ambiguity_zero_shot include: ../greedy_until_template_yaml task: bigbench_sentence_ambiguity_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/similarities_abstraction.yaml b/lm_eval/tasks/bigbench/greedy_until/similarities_abstraction.yaml index 5c1ef27f..10e9a439 100644 --- a/lm_eval/tasks/bigbench/greedy_until/similarities_abstraction.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/similarities_abstraction.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: similarities_abstraction +dataset_name: similarities_abstraction_zero_shot include: ../greedy_until_template_yaml task: bigbench_similarities_abstraction_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simp_turing_concept.yaml b/lm_eval/tasks/bigbench/greedy_until/simp_turing_concept.yaml index 742df0fb..a82b8226 100644 --- a/lm_eval/tasks/bigbench/greedy_until/simp_turing_concept.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/simp_turing_concept.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simp_turing_concept +dataset_name: simp_turing_concept_zero_shot include: ../greedy_until_template_yaml task: bigbench_simp_turing_concept_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json.yaml index 4e70a160..8e0a207e 100644 --- a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_arithmetic_json +dataset_name: simple_arithmetic_json_zero_shot include: ../greedy_until_template_yaml task: bigbench_simple_arithmetic_json_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_multiple_choice.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_multiple_choice.yaml index 5f6b6732..df235325 100644 --- a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_multiple_choice.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_multiple_choice.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_arithmetic_json_multiple_choice +dataset_name: simple_arithmetic_json_multiple_choice_zero_shot include: ../greedy_until_template_yaml task: bigbench_simple_arithmetic_json_multiple_choice_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_subtasks.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_subtasks.yaml index 32c5fcfd..2f981fb0 100644 --- a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_subtasks.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_json_subtasks.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_arithmetic_json_subtasks +dataset_name: simple_arithmetic_json_subtasks_zero_shot include: ../greedy_until_template_yaml task: bigbench_simple_arithmetic_json_subtasks_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_multiple_targets_json.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_multiple_targets_json.yaml index 0d87803e..2bc6cf16 100644 --- a/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_multiple_targets_json.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/simple_arithmetic_multiple_targets_json.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_arithmetic_multiple_targets_json +dataset_name: simple_arithmetic_multiple_targets_json_zero_shot include: ../greedy_until_template_yaml task: bigbench_simple_arithmetic_multiple_targets_json_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_ethical_questions.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_ethical_questions.yaml index 2332985c..77e45a58 100644 --- a/lm_eval/tasks/bigbench/greedy_until/simple_ethical_questions.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/simple_ethical_questions.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_ethical_questions +dataset_name: simple_ethical_questions_zero_shot include: ../greedy_until_template_yaml task: bigbench_simple_ethical_questions_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/simple_text_editing.yaml b/lm_eval/tasks/bigbench/greedy_until/simple_text_editing.yaml index 1d9943e4..1b485d5c 100644 --- a/lm_eval/tasks/bigbench/greedy_until/simple_text_editing.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/simple_text_editing.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_text_editing +dataset_name: simple_text_editing_zero_shot include: ../greedy_until_template_yaml task: bigbench_simple_text_editing_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/snarks.yaml b/lm_eval/tasks/bigbench/greedy_until/snarks.yaml index e98308e1..9ccbda74 100644 --- a/lm_eval/tasks/bigbench/greedy_until/snarks.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/snarks.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: snarks +dataset_name: snarks_zero_shot include: ../greedy_until_template_yaml task: bigbench_snarks_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/social_iqa.yaml b/lm_eval/tasks/bigbench/greedy_until/social_iqa.yaml index 7af09b30..9cbc5ec5 100644 --- a/lm_eval/tasks/bigbench/greedy_until/social_iqa.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/social_iqa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: social_iqa +dataset_name: social_iqa_zero_shot include: ../greedy_until_template_yaml task: bigbench_social_iqa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/social_support.yaml b/lm_eval/tasks/bigbench/greedy_until/social_support.yaml index 8e34e758..bcc3a9d1 100644 --- a/lm_eval/tasks/bigbench/greedy_until/social_support.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/social_support.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: social_support +dataset_name: social_support_zero_shot include: ../greedy_until_template_yaml task: bigbench_social_support_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/sports_understanding.yaml b/lm_eval/tasks/bigbench/greedy_until/sports_understanding.yaml index 3ae80c24..01082a10 100644 --- a/lm_eval/tasks/bigbench/greedy_until/sports_understanding.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/sports_understanding.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: sports_understanding +dataset_name: sports_understanding_zero_shot include: ../greedy_until_template_yaml task: bigbench_sports_understanding_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/strange_stories.yaml b/lm_eval/tasks/bigbench/greedy_until/strange_stories.yaml index b6020b08..a0bf1c46 100644 --- a/lm_eval/tasks/bigbench/greedy_until/strange_stories.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/strange_stories.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: strange_stories +dataset_name: strange_stories_zero_shot include: ../greedy_until_template_yaml task: bigbench_strange_stories_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/strategyqa.yaml b/lm_eval/tasks/bigbench/greedy_until/strategyqa.yaml index 066c89d1..495d873f 100644 --- a/lm_eval/tasks/bigbench/greedy_until/strategyqa.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/strategyqa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: strategyqa +dataset_name: strategyqa_zero_shot include: ../greedy_until_template_yaml task: bigbench_strategyqa_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/sufficient_information.yaml b/lm_eval/tasks/bigbench/greedy_until/sufficient_information.yaml index 27ef04dd..3484952c 100644 --- a/lm_eval/tasks/bigbench/greedy_until/sufficient_information.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/sufficient_information.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: sufficient_information +dataset_name: sufficient_information_zero_shot include: ../greedy_until_template_yaml task: bigbench_sufficient_information_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/suicide_risk.yaml b/lm_eval/tasks/bigbench/greedy_until/suicide_risk.yaml index a7496025..a8e980d5 100644 --- a/lm_eval/tasks/bigbench/greedy_until/suicide_risk.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/suicide_risk.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: suicide_risk +dataset_name: suicide_risk_zero_shot include: ../greedy_until_template_yaml task: bigbench_suicide_risk_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/swahili_english_proverbs.yaml b/lm_eval/tasks/bigbench/greedy_until/swahili_english_proverbs.yaml index 25e7dfe6..ff045534 100644 --- a/lm_eval/tasks/bigbench/greedy_until/swahili_english_proverbs.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/swahili_english_proverbs.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: swahili_english_proverbs +dataset_name: swahili_english_proverbs_zero_shot include: ../greedy_until_template_yaml task: bigbench_swahili_english_proverbs_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/swedish_to_german_proverbs.yaml b/lm_eval/tasks/bigbench/greedy_until/swedish_to_german_proverbs.yaml index 1beebb17..8cbd401b 100644 --- a/lm_eval/tasks/bigbench/greedy_until/swedish_to_german_proverbs.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/swedish_to_german_proverbs.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: swedish_to_german_proverbs +dataset_name: swedish_to_german_proverbs_zero_shot include: ../greedy_until_template_yaml task: bigbench_swedish_to_german_proverbs_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/symbol_interpretation.yaml b/lm_eval/tasks/bigbench/greedy_until/symbol_interpretation.yaml index 27b29a05..3fa4cdba 100644 --- a/lm_eval/tasks/bigbench/greedy_until/symbol_interpretation.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/symbol_interpretation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: symbol_interpretation +dataset_name: symbol_interpretation_zero_shot include: ../greedy_until_template_yaml task: bigbench_symbol_interpretation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/temporal_sequences.yaml b/lm_eval/tasks/bigbench/greedy_until/temporal_sequences.yaml index 6ed42414..c20300f8 100644 --- a/lm_eval/tasks/bigbench/greedy_until/temporal_sequences.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/temporal_sequences.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: temporal_sequences +dataset_name: temporal_sequences_zero_shot include: ../greedy_until_template_yaml task: bigbench_temporal_sequences_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/tense.yaml b/lm_eval/tasks/bigbench/greedy_until/tense.yaml index 49adc7c2..b1b5698d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/tense.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/tense.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: tense +dataset_name: tense_zero_shot include: ../greedy_until_template_yaml task: bigbench_tense_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/timedial.yaml b/lm_eval/tasks/bigbench/greedy_until/timedial.yaml index 391dff43..d5f1950e 100644 --- a/lm_eval/tasks/bigbench/greedy_until/timedial.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/timedial.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: timedial +dataset_name: timedial_zero_shot include: ../greedy_until_template_yaml task: bigbench_timedial_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/topical_chat.yaml b/lm_eval/tasks/bigbench/greedy_until/topical_chat.yaml index f9f1893f..4ec83039 100644 --- a/lm_eval/tasks/bigbench/greedy_until/topical_chat.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/topical_chat.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: topical_chat +dataset_name: topical_chat_zero_shot include: ../greedy_until_template_yaml task: bigbench_topical_chat_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/tracking_shuffled_objects.yaml b/lm_eval/tasks/bigbench/greedy_until/tracking_shuffled_objects.yaml index 675b0e37..27024bee 100644 --- a/lm_eval/tasks/bigbench/greedy_until/tracking_shuffled_objects.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/tracking_shuffled_objects.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: tracking_shuffled_objects +dataset_name: tracking_shuffled_objects_zero_shot include: ../greedy_until_template_yaml task: bigbench_tracking_shuffled_objects_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/understanding_fables.yaml b/lm_eval/tasks/bigbench/greedy_until/understanding_fables.yaml index 3c5ff40a..f467652d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/understanding_fables.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/understanding_fables.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: understanding_fables +dataset_name: understanding_fables_zero_shot include: ../greedy_until_template_yaml task: bigbench_understanding_fables_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/undo_permutation.yaml b/lm_eval/tasks/bigbench/greedy_until/undo_permutation.yaml index 8e0c0699..d91ff331 100644 --- a/lm_eval/tasks/bigbench/greedy_until/undo_permutation.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/undo_permutation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: undo_permutation +dataset_name: undo_permutation_zero_shot include: ../greedy_until_template_yaml task: bigbench_undo_permutation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/unit_conversion.yaml b/lm_eval/tasks/bigbench/greedy_until/unit_conversion.yaml index 384ccc05..a31929fb 100644 --- a/lm_eval/tasks/bigbench/greedy_until/unit_conversion.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/unit_conversion.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: unit_conversion +dataset_name: unit_conversion_zero_shot include: ../greedy_until_template_yaml task: bigbench_unit_conversion_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/unit_interpretation.yaml b/lm_eval/tasks/bigbench/greedy_until/unit_interpretation.yaml index a33bfd51..ca4c38be 100644 --- a/lm_eval/tasks/bigbench/greedy_until/unit_interpretation.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/unit_interpretation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: unit_interpretation +dataset_name: unit_interpretation_zero_shot include: ../greedy_until_template_yaml task: bigbench_unit_interpretation_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/unnatural_in_context_learning.yaml b/lm_eval/tasks/bigbench/greedy_until/unnatural_in_context_learning.yaml index cb3d2572..1cc271d2 100644 --- a/lm_eval/tasks/bigbench/greedy_until/unnatural_in_context_learning.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/unnatural_in_context_learning.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: unnatural_in_context_learning +dataset_name: unnatural_in_context_learning_zero_shot include: ../greedy_until_template_yaml task: bigbench_unnatural_in_context_learning_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/vitaminc_fact_verification.yaml b/lm_eval/tasks/bigbench/greedy_until/vitaminc_fact_verification.yaml index 67380ab9..770e8500 100644 --- a/lm_eval/tasks/bigbench/greedy_until/vitaminc_fact_verification.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/vitaminc_fact_verification.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: vitaminc_fact_verification +dataset_name: vitaminc_fact_verification_zero_shot include: ../greedy_until_template_yaml task: bigbench_vitaminc_fact_verification_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/what_is_the_tao.yaml b/lm_eval/tasks/bigbench/greedy_until/what_is_the_tao.yaml index baad0d9e..8c60da65 100644 --- a/lm_eval/tasks/bigbench/greedy_until/what_is_the_tao.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/what_is_the_tao.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: what_is_the_tao +dataset_name: what_is_the_tao_zero_shot include: ../greedy_until_template_yaml task: bigbench_what_is_the_tao_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/which_wiki_edit.yaml b/lm_eval/tasks/bigbench/greedy_until/which_wiki_edit.yaml index 70047ee7..4eda6d08 100644 --- a/lm_eval/tasks/bigbench/greedy_until/which_wiki_edit.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/which_wiki_edit.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: which_wiki_edit +dataset_name: which_wiki_edit_zero_shot include: ../greedy_until_template_yaml task: bigbench_which_wiki_edit_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/winowhy.yaml b/lm_eval/tasks/bigbench/greedy_until/winowhy.yaml index fff312b3..e065c80c 100644 --- a/lm_eval/tasks/bigbench/greedy_until/winowhy.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/winowhy.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: winowhy +dataset_name: winowhy_zero_shot include: ../greedy_until_template_yaml task: bigbench_winowhy_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/word_sorting.yaml b/lm_eval/tasks/bigbench/greedy_until/word_sorting.yaml index 77b55d77..caa6f02d 100644 --- a/lm_eval/tasks/bigbench/greedy_until/word_sorting.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/word_sorting.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: word_sorting +dataset_name: word_sorting_zero_shot include: ../greedy_until_template_yaml task: bigbench_word_sorting_greedy_until diff --git a/lm_eval/tasks/bigbench/greedy_until/word_unscrambling.yaml b/lm_eval/tasks/bigbench/greedy_until/word_unscrambling.yaml index 75fe7de9..774aef15 100644 --- a/lm_eval/tasks/bigbench/greedy_until/word_unscrambling.yaml +++ b/lm_eval/tasks/bigbench/greedy_until/word_unscrambling.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: word_unscrambling +dataset_name: word_unscrambling_zero_shot include: ../greedy_until_template_yaml task: bigbench_word_unscrambling_greedy_until diff --git a/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml b/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml index e815ad82..34cefc25 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: abstract_narrative_understanding +dataset_name: abstract_narrative_understanding_zero_shot include: ../multiple_choice_template_yaml task: bigbench_abstract_narrative_understanding_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml b/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml index 0edb33ae..b1e2903c 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: anachronisms +dataset_name: anachronisms_zero_shot include: ../multiple_choice_template_yaml task: bigbench_anachronisms_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml b/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml index 4a63e23a..6e20092e 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: analogical_similarity +dataset_name: analogical_similarity_zero_shot include: ../multiple_choice_template_yaml task: bigbench_analogical_similarity_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml b/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml index 3503337d..9ecf8fb5 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: analytic_entailment +dataset_name: analytic_entailment_zero_shot include: ../multiple_choice_template_yaml task: bigbench_analytic_entailment_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml b/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml index a7af2d17..9b19b92f 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: arithmetic +dataset_name: arithmetic_zero_shot include: ../multiple_choice_template_yaml task: bigbench_arithmetic_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml b/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml index 9eca1362..254f115b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: ascii_word_recognition +dataset_name: ascii_word_recognition_zero_shot include: ../multiple_choice_template_yaml task: bigbench_ascii_word_recognition_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml b/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml index 0c49e8ee..4caeacd4 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: authorship_verification +dataset_name: authorship_verification_zero_shot include: ../multiple_choice_template_yaml task: bigbench_authorship_verification_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml b/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml index 108cc802..16e62e69 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: auto_categorization +dataset_name: auto_categorization_zero_shot include: ../multiple_choice_template_yaml task: bigbench_auto_categorization_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml b/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml index 7ae0c2a5..72db1d8e 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: auto_debugging +dataset_name: auto_debugging_zero_shot include: ../multiple_choice_template_yaml task: bigbench_auto_debugging_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml b/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml index 6cb2bff4..3c4be304 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: bbq_lite_json +dataset_name: bbq_lite_json_zero_shot include: ../multiple_choice_template_yaml task: bigbench_bbq_lite_json_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml b/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml index 33871759..73448ad9 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: bridging_anaphora_resolution_barqa +dataset_name: bridging_anaphora_resolution_barqa_zero_shot include: ../multiple_choice_template_yaml task: bigbench_bridging_anaphora_resolution_barqa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml b/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml index 340e9bda..1d09f2d4 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: causal_judgment +dataset_name: causal_judgment_zero_shot include: ../multiple_choice_template_yaml task: bigbench_causal_judgment_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml b/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml index 4b3dd1a6..c39ec278 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: cause_and_effect +dataset_name: cause_and_effect_zero_shot include: ../multiple_choice_template_yaml task: bigbench_cause_and_effect_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml b/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml index 000360c0..0a9883d0 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: checkmate_in_one +dataset_name: checkmate_in_one_zero_shot include: ../multiple_choice_template_yaml task: bigbench_checkmate_in_one_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml b/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml index b6d1f2e2..ea299797 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: chess_state_tracking +dataset_name: chess_state_tracking_zero_shot include: ../multiple_choice_template_yaml task: bigbench_chess_state_tracking_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml b/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml index 2552166c..c24d5761 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: chinese_remainder_theorem +dataset_name: chinese_remainder_theorem_zero_shot include: ../multiple_choice_template_yaml task: bigbench_chinese_remainder_theorem_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml b/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml index a03e56c0..f5918e60 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: cifar10_classification +dataset_name: cifar10_classification_zero_shot include: ../multiple_choice_template_yaml task: bigbench_cifar10_classification_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml b/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml index f2a33424..9360f759 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: code_line_description +dataset_name: code_line_description_zero_shot include: ../multiple_choice_template_yaml task: bigbench_code_line_description_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml b/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml index c03dc365..5655ea1f 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: codenames +dataset_name: codenames_zero_shot include: ../multiple_choice_template_yaml task: bigbench_codenames_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/color.yaml b/lm_eval/tasks/bigbench/multiple_choice/color.yaml index f49710c7..7350013f 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/color.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/color.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: color +dataset_name: color_zero_shot include: ../multiple_choice_template_yaml task: bigbench_color_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml b/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml index 619c8eea..bf8f3aca 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: common_morpheme +dataset_name: common_morpheme_zero_shot include: ../multiple_choice_template_yaml task: bigbench_common_morpheme_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml b/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml index a7570bb0..3ee13b37 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: conceptual_combinations +dataset_name: conceptual_combinations_zero_shot include: ../multiple_choice_template_yaml task: bigbench_conceptual_combinations_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml b/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml index 4ff6ef02..e5a28097 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: conlang_translation +dataset_name: conlang_translation_zero_shot include: ../multiple_choice_template_yaml task: bigbench_conlang_translation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml b/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml index 4b34eec8..3bf9d9bf 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: contextual_parametric_knowledge_conflicts +dataset_name: contextual_parametric_knowledge_conflicts_zero_shot include: ../multiple_choice_template_yaml task: bigbench_contextual_parametric_knowledge_conflicts_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml b/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml index 2b0b9d46..4aca69ad 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: crash_blossom +dataset_name: crash_blossom_zero_shot include: ../multiple_choice_template_yaml task: bigbench_crash_blossom_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml b/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml index c203459a..ac7c1820 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: crass_ai +dataset_name: crass_ai_zero_shot include: ../multiple_choice_template_yaml task: bigbench_crass_ai_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml b/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml index c8cdd625..c187505d 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: cryobiology_spanish +dataset_name: cryobiology_spanish_zero_shot include: ../multiple_choice_template_yaml task: bigbench_cryobiology_spanish_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml b/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml index 503cd601..c5e0519f 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: cryptonite +dataset_name: cryptonite_zero_shot include: ../multiple_choice_template_yaml task: bigbench_cryptonite_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml b/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml index bb9d90b0..0b8e694c 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: cs_algorithms +dataset_name: cs_algorithms_zero_shot include: ../multiple_choice_template_yaml task: bigbench_cs_algorithms_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml b/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml index cb00651a..3a77ea44 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: dark_humor_detection +dataset_name: dark_humor_detection_zero_shot include: ../multiple_choice_template_yaml task: bigbench_dark_humor_detection_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml b/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml index 596a941e..2851f0bb 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: date_understanding +dataset_name: date_understanding_zero_shot include: ../multiple_choice_template_yaml task: bigbench_date_understanding_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml b/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml index 5264c21f..2827232a 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: disambiguation_qa +dataset_name: disambiguation_qa_zero_shot include: ../multiple_choice_template_yaml task: bigbench_disambiguation_qa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml b/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml index 151616c2..5a18733f 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: discourse_marker_prediction +dataset_name: discourse_marker_prediction_zero_shot include: ../multiple_choice_template_yaml task: bigbench_discourse_marker_prediction_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml b/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml index 578df2a3..bf8494cf 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: disfl_qa +dataset_name: disfl_qa_zero_shot include: ../multiple_choice_template_yaml task: bigbench_disfl_qa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml b/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml index 07ecf4d4..48d6f32e 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: dyck_languages +dataset_name: dyck_languages_zero_shot include: ../multiple_choice_template_yaml task: bigbench_dyck_languages_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml b/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml index d9e41204..64cb58ff 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: elementary_math_qa +dataset_name: elementary_math_qa_zero_shot include: ../multiple_choice_template_yaml task: bigbench_elementary_math_qa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml b/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml index f6528de7..0604d97d 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: emoji_movie +dataset_name: emoji_movie_zero_shot include: ../multiple_choice_template_yaml task: bigbench_emoji_movie_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml b/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml index cedbd41c..ff648d9c 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: emojis_emotion_prediction +dataset_name: emojis_emotion_prediction_zero_shot include: ../multiple_choice_template_yaml task: bigbench_emojis_emotion_prediction_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml b/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml index 078a3c45..c848740b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: empirical_judgments +dataset_name: empirical_judgments_zero_shot include: ../multiple_choice_template_yaml task: bigbench_empirical_judgments_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml b/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml index 0dd3a6c6..8adc12e9 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: english_proverbs +dataset_name: english_proverbs_zero_shot include: ../multiple_choice_template_yaml task: bigbench_english_proverbs_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml b/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml index 12c7dae6..ed26147a 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: english_russian_proverbs +dataset_name: english_russian_proverbs_zero_shot include: ../multiple_choice_template_yaml task: bigbench_english_russian_proverbs_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml b/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml index 336a013e..24444e55 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: entailed_polarity +dataset_name: entailed_polarity_zero_shot include: ../multiple_choice_template_yaml task: bigbench_entailed_polarity_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml b/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml index 4d1bf0e8..32878c8b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: entailed_polarity_hindi +dataset_name: entailed_polarity_hindi_zero_shot include: ../multiple_choice_template_yaml task: bigbench_entailed_polarity_hindi_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml b/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml index 79827577..2c35581a 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: epistemic_reasoning +dataset_name: epistemic_reasoning_zero_shot include: ../multiple_choice_template_yaml task: bigbench_epistemic_reasoning_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml b/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml index f82cd899..b85acd95 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: evaluating_information_essentiality +dataset_name: evaluating_information_essentiality_zero_shot include: ../multiple_choice_template_yaml task: bigbench_evaluating_information_essentiality_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml b/lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml index 2e20aabe..4fbed803 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: fact_checker +dataset_name: fact_checker_zero_shot include: ../multiple_choice_template_yaml task: bigbench_fact_checker_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml b/lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml index e7931f2f..68a55e47 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: fantasy_reasoning +dataset_name: fantasy_reasoning_zero_shot include: ../multiple_choice_template_yaml task: bigbench_fantasy_reasoning_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml b/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml index 593c4860..39fcd9cf 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: few_shot_nlg +dataset_name: few_shot_nlg_zero_shot include: ../multiple_choice_template_yaml task: bigbench_few_shot_nlg_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml b/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml index 00f07670..68a83956 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: figure_of_speech_detection +dataset_name: figure_of_speech_detection_zero_shot include: ../multiple_choice_template_yaml task: bigbench_figure_of_speech_detection_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml b/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml index b2eb5aca..7ff37fd7 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: formal_fallacies_syllogisms_negation +dataset_name: formal_fallacies_syllogisms_negation_zero_shot include: ../multiple_choice_template_yaml task: bigbench_formal_fallacies_syllogisms_negation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/gem.yaml b/lm_eval/tasks/bigbench/multiple_choice/gem.yaml index 5fd4caae..bf81e880 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/gem.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/gem.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: gem +dataset_name: gem_zero_shot include: ../multiple_choice_template_yaml task: bigbench_gem_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml b/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml index 77d16864..39eee21a 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: gender_inclusive_sentences_german +dataset_name: gender_inclusive_sentences_german_zero_shot include: ../multiple_choice_template_yaml task: bigbench_gender_inclusive_sentences_german_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml b/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml index 021ad284..8083b869 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: general_knowledge +dataset_name: general_knowledge_zero_shot include: ../multiple_choice_template_yaml task: bigbench_general_knowledge_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml b/lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml index cfc2ada2..7b80acbf 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: geometric_shapes +dataset_name: geometric_shapes_zero_shot include: ../multiple_choice_template_yaml task: bigbench_geometric_shapes_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml b/lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml index e457887f..6413fb03 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: goal_step_wikihow +dataset_name: goal_step_wikihow_zero_shot include: ../multiple_choice_template_yaml task: bigbench_goal_step_wikihow_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml b/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml index 8ec630d5..53523c33 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: gre_reading_comprehension +dataset_name: gre_reading_comprehension_zero_shot include: ../multiple_choice_template_yaml task: bigbench_gre_reading_comprehension_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml b/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml index 94272e8a..c5e4f24a 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: hhh_alignment +dataset_name: hhh_alignment_zero_shot include: ../multiple_choice_template_yaml task: bigbench_hhh_alignment_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml b/lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml index 0ab2cecd..ed1ed278 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: hindi_question_answering +dataset_name: hindi_question_answering_zero_shot include: ../multiple_choice_template_yaml task: bigbench_hindi_question_answering_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml b/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml index 2d49951b..321f7513 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: hindu_knowledge +dataset_name: hindu_knowledge_zero_shot include: ../multiple_choice_template_yaml task: bigbench_hindu_knowledge_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml b/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml index 4c7ca8bd..5dac090f 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: hinglish_toxicity +dataset_name: hinglish_toxicity_zero_shot include: ../multiple_choice_template_yaml task: bigbench_hinglish_toxicity_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml b/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml index d04bccc9..2fef6d93 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: human_organs_senses +dataset_name: human_organs_senses_zero_shot include: ../multiple_choice_template_yaml task: bigbench_human_organs_senses_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml b/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml index 9e15ffac..34b37710 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: hyperbaton +dataset_name: hyperbaton_zero_shot include: ../multiple_choice_template_yaml task: bigbench_hyperbaton_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml b/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml index dfb75722..f716129d 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: identify_math_theorems +dataset_name: identify_math_theorems_zero_shot include: ../multiple_choice_template_yaml task: bigbench_identify_math_theorems_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml b/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml index 3657d3b0..93c4c244 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: identify_odd_metaphor +dataset_name: identify_odd_metaphor_zero_shot include: ../multiple_choice_template_yaml task: bigbench_identify_odd_metaphor_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml b/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml index 8c2d4c81..9a26fd55 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: implicatures +dataset_name: implicatures_zero_shot include: ../multiple_choice_template_yaml task: bigbench_implicatures_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml b/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml index a837cdf3..9bb08442 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: implicit_relations +dataset_name: implicit_relations_zero_shot include: ../multiple_choice_template_yaml task: bigbench_implicit_relations_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml b/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml index 9d9cb82b..720ac92a 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: intent_recognition +dataset_name: intent_recognition_zero_shot include: ../multiple_choice_template_yaml task: bigbench_intent_recognition_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml b/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml index 715582af..89d7742d 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: international_phonetic_alphabet_nli +dataset_name: international_phonetic_alphabet_nli_zero_shot include: ../multiple_choice_template_yaml task: bigbench_international_phonetic_alphabet_nli_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml b/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml index cd6f6f71..c8e866e2 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: international_phonetic_alphabet_transliterate +dataset_name: international_phonetic_alphabet_transliterate_zero_shot include: ../multiple_choice_template_yaml task: bigbench_international_phonetic_alphabet_transliterate_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml b/lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml index d6448572..6014a175 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: intersect_geometry +dataset_name: intersect_geometry_zero_shot include: ../multiple_choice_template_yaml task: bigbench_intersect_geometry_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml b/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml index bb8385f2..a19ff99e 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: irony_identification +dataset_name: irony_identification_zero_shot include: ../multiple_choice_template_yaml task: bigbench_irony_identification_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml b/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml index 7ba101c8..a90a8286 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: kanji_ascii +dataset_name: kanji_ascii_zero_shot include: ../multiple_choice_template_yaml task: bigbench_kanji_ascii_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml b/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml index e3767b21..910cec47 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: kannada +dataset_name: kannada_zero_shot include: ../multiple_choice_template_yaml task: bigbench_kannada_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml b/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml index 88c6bf5e..75a673c8 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: key_value_maps +dataset_name: key_value_maps_zero_shot include: ../multiple_choice_template_yaml task: bigbench_key_value_maps_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml b/lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml index de972c64..1c5f6293 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: known_unknowns +dataset_name: known_unknowns_zero_shot include: ../multiple_choice_template_yaml task: bigbench_known_unknowns_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml b/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml index 3e17fd8f..07e2711b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: language_games +dataset_name: language_games_zero_shot include: ../multiple_choice_template_yaml task: bigbench_language_games_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml b/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml index e17cdc69..9ea141fb 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: language_identification +dataset_name: language_identification_zero_shot include: ../multiple_choice_template_yaml task: bigbench_language_identification_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml b/lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml index 118de388..50800d9d 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: linguistic_mappings +dataset_name: linguistic_mappings_zero_shot include: ../multiple_choice_template_yaml task: bigbench_linguistic_mappings_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml b/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml index 4799e672..e269cd04 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: linguistics_puzzles +dataset_name: linguistics_puzzles_zero_shot include: ../multiple_choice_template_yaml task: bigbench_linguistics_puzzles_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml b/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml index f2c94ada..4f4f2ca1 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: list_functions +dataset_name: list_functions_zero_shot include: ../multiple_choice_template_yaml task: bigbench_list_functions_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml b/lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml index c24e71ac..da6a018f 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: logic_grid_puzzle +dataset_name: logic_grid_puzzle_zero_shot include: ../multiple_choice_template_yaml task: bigbench_logic_grid_puzzle_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml b/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml index 11e2771e..84f55f64 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: logical_args +dataset_name: logical_args_zero_shot include: ../multiple_choice_template_yaml task: bigbench_logical_args_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml b/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml index 0de47251..592d2afa 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: logical_deduction +dataset_name: logical_deduction_zero_shot include: ../multiple_choice_template_yaml task: bigbench_logical_deduction_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml b/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml index b4d68c1b..1c6411af 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: logical_fallacy_detection +dataset_name: logical_fallacy_detection_zero_shot include: ../multiple_choice_template_yaml task: bigbench_logical_fallacy_detection_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml b/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml index e58224b9..65671894 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: logical_sequence +dataset_name: logical_sequence_zero_shot include: ../multiple_choice_template_yaml task: bigbench_logical_sequence_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml b/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml index 316b8eed..4ed0ad3c 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: mathematical_induction +dataset_name: mathematical_induction_zero_shot include: ../multiple_choice_template_yaml task: bigbench_mathematical_induction_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml b/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml index ebbc32f5..9facf639 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: matrixshapes +dataset_name: matrixshapes_zero_shot include: ../multiple_choice_template_yaml task: bigbench_matrixshapes_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml b/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml index 2bbe0c00..7c476c4e 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: metaphor_boolean +dataset_name: metaphor_boolean_zero_shot include: ../multiple_choice_template_yaml task: bigbench_metaphor_boolean_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml b/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml index ae0fab49..6661a54f 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: metaphor_understanding +dataset_name: metaphor_understanding_zero_shot include: ../multiple_choice_template_yaml task: bigbench_metaphor_understanding_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml b/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml index 76b1bac0..67109c8c 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: minute_mysteries_qa +dataset_name: minute_mysteries_qa_zero_shot include: ../multiple_choice_template_yaml task: bigbench_minute_mysteries_qa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml b/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml index dce2a5c2..63d0fcda 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: misconceptions +dataset_name: misconceptions_zero_shot include: ../multiple_choice_template_yaml task: bigbench_misconceptions_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml b/lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml index fca2b324..f9c5db38 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: misconceptions_russian +dataset_name: misconceptions_russian_zero_shot include: ../multiple_choice_template_yaml task: bigbench_misconceptions_russian_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml b/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml index ac32701f..a1b091da 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: mnist_ascii +dataset_name: mnist_ascii_zero_shot include: ../multiple_choice_template_yaml task: bigbench_mnist_ascii_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml b/lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml index fd5c271a..c8a23735 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: modified_arithmetic +dataset_name: modified_arithmetic_zero_shot include: ../multiple_choice_template_yaml task: bigbench_modified_arithmetic_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml b/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml index 95414745..38295552 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: moral_permissibility +dataset_name: moral_permissibility_zero_shot include: ../multiple_choice_template_yaml task: bigbench_moral_permissibility_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml b/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml index 831b261a..89b93d9d 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: movie_dialog_same_or_different +dataset_name: movie_dialog_same_or_different_zero_shot include: ../multiple_choice_template_yaml task: bigbench_movie_dialog_same_or_different_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml b/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml index 16d4ea55..7055028e 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: movie_recommendation +dataset_name: movie_recommendation_zero_shot include: ../multiple_choice_template_yaml task: bigbench_movie_recommendation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml b/lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml index b7693b06..17b67bcc 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: mult_data_wrangling +dataset_name: mult_data_wrangling_zero_shot include: ../multiple_choice_template_yaml task: bigbench_mult_data_wrangling_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml b/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml index 8c954b5d..10ff48ea 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: multiemo +dataset_name: multiemo_zero_shot include: ../multiple_choice_template_yaml task: bigbench_multiemo_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml b/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml index 78d295c5..4874dd15 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: natural_instructions +dataset_name: natural_instructions_zero_shot include: ../multiple_choice_template_yaml task: bigbench_natural_instructions_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/navigate.yaml b/lm_eval/tasks/bigbench/multiple_choice/navigate.yaml index 3bc9f120..e69f2790 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/navigate.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/navigate.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: navigate +dataset_name: navigate_zero_shot include: ../multiple_choice_template_yaml task: bigbench_navigate_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml b/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml index 7a7b2d80..52d25bca 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: nonsense_words_grammar +dataset_name: nonsense_words_grammar_zero_shot include: ../multiple_choice_template_yaml task: bigbench_nonsense_words_grammar_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml b/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml index 04172c1a..3fc74aa9 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: novel_concepts +dataset_name: novel_concepts_zero_shot include: ../multiple_choice_template_yaml task: bigbench_novel_concepts_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml b/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml index c6ab4011..277d843d 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: object_counting +dataset_name: object_counting_zero_shot include: ../multiple_choice_template_yaml task: bigbench_object_counting_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml b/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml index 82d70a63..aaa43e67 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: odd_one_out +dataset_name: odd_one_out_zero_shot include: ../multiple_choice_template_yaml task: bigbench_odd_one_out_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/operators.yaml b/lm_eval/tasks/bigbench/multiple_choice/operators.yaml index e48c8005..951db6f9 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/operators.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/operators.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: operators +dataset_name: operators_zero_shot include: ../multiple_choice_template_yaml task: bigbench_operators_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml b/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml index 3423101a..2cfc8283 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: paragraph_segmentation +dataset_name: paragraph_segmentation_zero_shot include: ../multiple_choice_template_yaml task: bigbench_paragraph_segmentation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml b/lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml index a2f65cde..7a9b61fb 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: parsinlu_qa +dataset_name: parsinlu_qa_zero_shot include: ../multiple_choice_template_yaml task: bigbench_parsinlu_qa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml b/lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml index 3f0f6182..5fa0eccc 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: parsinlu_reading_comprehension +dataset_name: parsinlu_reading_comprehension_zero_shot include: ../multiple_choice_template_yaml task: bigbench_parsinlu_reading_comprehension_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml b/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml index ed4945f9..de024e2e 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: penguins_in_a_table +dataset_name: penguins_in_a_table_zero_shot include: ../multiple_choice_template_yaml task: bigbench_penguins_in_a_table_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml b/lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml index 5adb9422..b7a644f9 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: periodic_elements +dataset_name: periodic_elements_zero_shot include: ../multiple_choice_template_yaml task: bigbench_periodic_elements_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml b/lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml index c0ee240f..6fa92ed3 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: persian_idioms +dataset_name: persian_idioms_zero_shot include: ../multiple_choice_template_yaml task: bigbench_persian_idioms_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml b/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml index 6231e5c0..c797aec6 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: phrase_relatedness +dataset_name: phrase_relatedness_zero_shot include: ../multiple_choice_template_yaml task: bigbench_phrase_relatedness_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml b/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml index 50353ac7..089376dd 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: physical_intuition +dataset_name: physical_intuition_zero_shot include: ../multiple_choice_template_yaml task: bigbench_physical_intuition_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/physics.yaml b/lm_eval/tasks/bigbench/multiple_choice/physics.yaml index f3b4244e..bc06f79d 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/physics.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/physics.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: physics +dataset_name: physics_zero_shot include: ../multiple_choice_template_yaml task: bigbench_physics_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml b/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml index ec5e9531..44646f14 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: physics_questions +dataset_name: physics_questions_zero_shot include: ../multiple_choice_template_yaml task: bigbench_physics_questions_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml b/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml index a81f33b0..85aac7f4 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: play_dialog_same_or_different +dataset_name: play_dialog_same_or_different_zero_shot include: ../multiple_choice_template_yaml task: bigbench_play_dialog_same_or_different_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml b/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml index af82fce2..d61345fe 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: polish_sequence_labeling +dataset_name: polish_sequence_labeling_zero_shot include: ../multiple_choice_template_yaml task: bigbench_polish_sequence_labeling_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml b/lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml index 83b733a3..71a56aa8 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: presuppositions_as_nli +dataset_name: presuppositions_as_nli_zero_shot include: ../multiple_choice_template_yaml task: bigbench_presuppositions_as_nli_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml b/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml index 5f52b44c..263d61eb 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: qa_wikidata +dataset_name: qa_wikidata_zero_shot include: ../multiple_choice_template_yaml task: bigbench_qa_wikidata_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml b/lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml index 1b4301bb..3b3dd0d7 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: question_selection +dataset_name: question_selection_zero_shot include: ../multiple_choice_template_yaml task: bigbench_question_selection_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml b/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml index d41cd5dd..8138791f 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: real_or_fake_text +dataset_name: real_or_fake_text_zero_shot include: ../multiple_choice_template_yaml task: bigbench_real_or_fake_text_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml b/lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml index e5e6f520..3ab6d5e0 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: reasoning_about_colored_objects +dataset_name: reasoning_about_colored_objects_zero_shot include: ../multiple_choice_template_yaml task: bigbench_reasoning_about_colored_objects_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml b/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml index 73406e70..666aa49b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: repeat_copy_logic +dataset_name: repeat_copy_logic_zero_shot include: ../multiple_choice_template_yaml task: bigbench_repeat_copy_logic_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml b/lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml index b785712c..49e3cb4b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: rephrase +dataset_name: rephrase_zero_shot include: ../multiple_choice_template_yaml task: bigbench_rephrase_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml b/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml index e8aff5b3..93434e2c 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: riddle_sense +dataset_name: riddle_sense_zero_shot include: ../multiple_choice_template_yaml task: bigbench_riddle_sense_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml b/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml index 7504f388..32c38ba3 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: ruin_names +dataset_name: ruin_names_zero_shot include: ../multiple_choice_template_yaml task: bigbench_ruin_names_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml b/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml index a462eb15..d930e741 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: salient_translation_error_detection +dataset_name: salient_translation_error_detection_zero_shot include: ../multiple_choice_template_yaml task: bigbench_salient_translation_error_detection_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml b/lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml index 5ea881cd..f23190e7 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: scientific_press_release +dataset_name: scientific_press_release_zero_shot include: ../multiple_choice_template_yaml task: bigbench_scientific_press_release_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml b/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml index 886b61be..00574b2f 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: semantic_parsing_in_context_sparc +dataset_name: semantic_parsing_in_context_sparc_zero_shot include: ../multiple_choice_template_yaml task: bigbench_semantic_parsing_in_context_sparc_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml b/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml index cb5dc922..a988e54c 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: semantic_parsing_spider +dataset_name: semantic_parsing_spider_zero_shot include: ../multiple_choice_template_yaml task: bigbench_semantic_parsing_spider_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml b/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml index 573f6199..4e4a18f1 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: sentence_ambiguity +dataset_name: sentence_ambiguity_zero_shot include: ../multiple_choice_template_yaml task: bigbench_sentence_ambiguity_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml b/lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml index 1e0c4ffb..82b86d1b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: similarities_abstraction +dataset_name: similarities_abstraction_zero_shot include: ../multiple_choice_template_yaml task: bigbench_similarities_abstraction_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml b/lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml index 2e453821..7b1849d5 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simp_turing_concept +dataset_name: simp_turing_concept_zero_shot include: ../multiple_choice_template_yaml task: bigbench_simp_turing_concept_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml index e5e24f58..cd1b61b9 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_arithmetic_json +dataset_name: simple_arithmetic_json_zero_shot include: ../multiple_choice_template_yaml task: bigbench_simple_arithmetic_json_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml index 4fb67ac5..4e63fce9 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_arithmetic_json_multiple_choice +dataset_name: simple_arithmetic_json_multiple_choice_zero_shot include: ../multiple_choice_template_yaml task: bigbench_simple_arithmetic_json_multiple_choice_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml index 67853d68..8688512b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_arithmetic_json_subtasks +dataset_name: simple_arithmetic_json_subtasks_zero_shot include: ../multiple_choice_template_yaml task: bigbench_simple_arithmetic_json_subtasks_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml index b76bfbde..685ec17c 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_arithmetic_multiple_targets_json +dataset_name: simple_arithmetic_multiple_targets_json_zero_shot include: ../multiple_choice_template_yaml task: bigbench_simple_arithmetic_multiple_targets_json_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml index a8a10ca6..0983381b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_ethical_questions +dataset_name: simple_ethical_questions_zero_shot include: ../multiple_choice_template_yaml task: bigbench_simple_ethical_questions_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml b/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml index 3bbecfb9..13b67888 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: simple_text_editing +dataset_name: simple_text_editing_zero_shot include: ../multiple_choice_template_yaml task: bigbench_simple_text_editing_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml b/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml index 4e0b9d3a..3e79f1ce 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: snarks +dataset_name: snarks_zero_shot include: ../multiple_choice_template_yaml task: bigbench_snarks_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml b/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml index de12bcbd..a4da50c9 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: social_iqa +dataset_name: social_iqa_zero_shot include: ../multiple_choice_template_yaml task: bigbench_social_iqa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/social_support.yaml b/lm_eval/tasks/bigbench/multiple_choice/social_support.yaml index f2e8c795..1b3bd593 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/social_support.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/social_support.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: social_support +dataset_name: social_support_zero_shot include: ../multiple_choice_template_yaml task: bigbench_social_support_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml b/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml index 4a3914a4..e5a123fc 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: sports_understanding +dataset_name: sports_understanding_zero_shot include: ../multiple_choice_template_yaml task: bigbench_sports_understanding_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml b/lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml index f0882aa2..30877750 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: strange_stories +dataset_name: strange_stories_zero_shot include: ../multiple_choice_template_yaml task: bigbench_strange_stories_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml b/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml index e99618c0..f988071b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: strategyqa +dataset_name: strategyqa_zero_shot include: ../multiple_choice_template_yaml task: bigbench_strategyqa_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml b/lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml index 56af1ae2..f53d677c 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: sufficient_information +dataset_name: sufficient_information_zero_shot include: ../multiple_choice_template_yaml task: bigbench_sufficient_information_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml b/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml index 5c6f0cd2..ecf7465f 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: suicide_risk +dataset_name: suicide_risk_zero_shot include: ../multiple_choice_template_yaml task: bigbench_suicide_risk_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml b/lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml index 497980ae..40103274 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: swahili_english_proverbs +dataset_name: swahili_english_proverbs_zero_shot include: ../multiple_choice_template_yaml task: bigbench_swahili_english_proverbs_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml b/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml index 46d49ddc..d2f31d3c 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: swedish_to_german_proverbs +dataset_name: swedish_to_german_proverbs_zero_shot include: ../multiple_choice_template_yaml task: bigbench_swedish_to_german_proverbs_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml b/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml index a6032ad9..98e3d5b3 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: symbol_interpretation +dataset_name: symbol_interpretation_zero_shot include: ../multiple_choice_template_yaml task: bigbench_symbol_interpretation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml b/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml index 4a63b2ac..abd8834b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: temporal_sequences +dataset_name: temporal_sequences_zero_shot include: ../multiple_choice_template_yaml task: bigbench_temporal_sequences_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/tense.yaml b/lm_eval/tasks/bigbench/multiple_choice/tense.yaml index 4fce296d..6a2676f0 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/tense.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/tense.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: tense +dataset_name: tense_zero_shot include: ../multiple_choice_template_yaml task: bigbench_tense_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/timedial.yaml b/lm_eval/tasks/bigbench/multiple_choice/timedial.yaml index 550d1190..350d4e78 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/timedial.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/timedial.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: timedial +dataset_name: timedial_zero_shot include: ../multiple_choice_template_yaml task: bigbench_timedial_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml b/lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml index 232dc706..b9a03639 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: topical_chat +dataset_name: topical_chat_zero_shot include: ../multiple_choice_template_yaml task: bigbench_topical_chat_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml b/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml index 8dd68282..f9aa366b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: tracking_shuffled_objects +dataset_name: tracking_shuffled_objects_zero_shot include: ../multiple_choice_template_yaml task: bigbench_tracking_shuffled_objects_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml b/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml index d85d63b1..263793af 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: understanding_fables +dataset_name: understanding_fables_zero_shot include: ../multiple_choice_template_yaml task: bigbench_understanding_fables_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml b/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml index 0e92a41f..f7e1feb0 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: undo_permutation +dataset_name: undo_permutation_zero_shot include: ../multiple_choice_template_yaml task: bigbench_undo_permutation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml b/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml index b4d421e2..21a67c43 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: unit_conversion +dataset_name: unit_conversion_zero_shot include: ../multiple_choice_template_yaml task: bigbench_unit_conversion_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml b/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml index eb60bc42..68614cfd 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: unit_interpretation +dataset_name: unit_interpretation_zero_shot include: ../multiple_choice_template_yaml task: bigbench_unit_interpretation_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml b/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml index 47c5b755..45943005 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: unnatural_in_context_learning +dataset_name: unnatural_in_context_learning_zero_shot include: ../multiple_choice_template_yaml task: bigbench_unnatural_in_context_learning_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml b/lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml index 3ddb5e69..84305bf3 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: vitaminc_fact_verification +dataset_name: vitaminc_fact_verification_zero_shot include: ../multiple_choice_template_yaml task: bigbench_vitaminc_fact_verification_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml b/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml index dda9a695..7879d166 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: what_is_the_tao +dataset_name: what_is_the_tao_zero_shot include: ../multiple_choice_template_yaml task: bigbench_what_is_the_tao_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml b/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml index a6a5bbbf..3dbfb030 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: which_wiki_edit +dataset_name: which_wiki_edit_zero_shot include: ../multiple_choice_template_yaml task: bigbench_which_wiki_edit_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml b/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml index 0b0a858c..98bc6e4b 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: winowhy +dataset_name: winowhy_zero_shot include: ../multiple_choice_template_yaml task: bigbench_winowhy_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml b/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml index c244f547..71e79ae3 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: word_sorting +dataset_name: word_sorting_zero_shot include: ../multiple_choice_template_yaml task: bigbench_word_sorting_multiple_choice diff --git a/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml b/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml index a993ef33..bbfeb144 100644 --- a/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml +++ b/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml @@ -1,4 +1,4 @@ # Generated by utils.py -dataset_name: word_unscrambling +dataset_name: word_unscrambling_zero_shot include: ../multiple_choice_template_yaml task: bigbench_word_unscrambling_multiple_choice -- GitLab From fa608798a8c5427a4d08aae03a7c8fa42ccba48f Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 3 Oct 2023 20:13:43 +0000 Subject: [PATCH 249/264] add bigbench push_to_hub script --- .../tasks/bigbench/push_bigbench_dataset.py | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 lm_eval/tasks/bigbench/push_bigbench_dataset.py diff --git a/lm_eval/tasks/bigbench/push_bigbench_dataset.py b/lm_eval/tasks/bigbench/push_bigbench_dataset.py new file mode 100644 index 00000000..93c0b6c0 --- /dev/null +++ b/lm_eval/tasks/bigbench/push_bigbench_dataset.py @@ -0,0 +1,32 @@ +""" +A utility script that pushes all Bigbench subtasks from their form in the `bigbench` HF dataset +into `{org name}/bigbench`. + +Prior to running, must log into HF Hub for the target HF hub org via `huggingface-cli login`. + +Requires the installation of +`pip install "bigbench @ https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz"` +and is included so that the bigbench dependency can be avoided. +""" +from tqdm import tqdm +import datasets + +import bigbench.api.util as bb_utils + + +all_task_names = bb_utils.get_all_json_task_names() + +num_shots = [0] + +for shots in num_shots: + for task_name in tqdm(all_task_names[29:]): + try: + print(f"Loading '{task_name}' with num_shots={shots}...") + task_ds = datasets.load_dataset("bigbench", name=task_name, num_shots=shots) + + print(f"Pushing '{task_name}' with num_shots={shots}...") + task_ds.push_to_hub("hails/bigbench", task_name + "_zero_shot") + + del task_ds + except Exception as e: + raise e \ No newline at end of file -- GitLab From 67c0f73a9493282c8bda3f9b8a053bc775cd2b30 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Tue, 3 Oct 2023 20:14:11 +0000 Subject: [PATCH 250/264] add bigbench push_to_hub script --- lm_eval/tasks/bigbench/push_bigbench_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/bigbench/push_bigbench_dataset.py b/lm_eval/tasks/bigbench/push_bigbench_dataset.py index 93c0b6c0..acc37465 100644 --- a/lm_eval/tasks/bigbench/push_bigbench_dataset.py +++ b/lm_eval/tasks/bigbench/push_bigbench_dataset.py @@ -19,7 +19,7 @@ all_task_names = bb_utils.get_all_json_task_names() num_shots = [0] for shots in num_shots: - for task_name in tqdm(all_task_names[29:]): + for task_name in tqdm(all_task_names): try: print(f"Loading '{task_name}' with num_shots={shots}...") task_ds = datasets.load_dataset("bigbench", name=task_name, num_shots=shots) -- GitLab From 7e6e3c200a48fa0c586c7ad9006324b0e05267aa Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Wed, 4 Oct 2023 00:54:59 +0000 Subject: [PATCH 251/264] modify default yamls --- lm_eval/tasks/bigbench/greedy_until_template_yaml | 3 +++ lm_eval/tasks/bigbench/multiple_choice_template_yaml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lm_eval/tasks/bigbench/greedy_until_template_yaml b/lm_eval/tasks/bigbench/greedy_until_template_yaml index db975306..3de59b2b 100644 --- a/lm_eval/tasks/bigbench/greedy_until_template_yaml +++ b/lm_eval/tasks/bigbench/greedy_until_template_yaml @@ -1,6 +1,9 @@ group: bigbench dataset_path: hails/bigbench output_type: greedy_until +dataset_kwargs: + # num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods + # subtask_name: null test_split: default doc_to_text: inputs doc_to_target: "{{targets[0]}}" diff --git a/lm_eval/tasks/bigbench/multiple_choice_template_yaml b/lm_eval/tasks/bigbench/multiple_choice_template_yaml index 6211f5b3..3a8185e8 100644 --- a/lm_eval/tasks/bigbench/multiple_choice_template_yaml +++ b/lm_eval/tasks/bigbench/multiple_choice_template_yaml @@ -1,7 +1,7 @@ group: bigbench dataset_path: hails/bigbench dataset_kwargs: - num_shots: 0 # TODO: num of shots should be controlled through this, not through the typical methods + # num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods # subtask_name: null output_type: multiple_choice test_split: default -- GitLab From 6f92c20da4b980230191bc0aea87dee07b640673 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Wed, 4 Oct 2023 06:46:45 +0000 Subject: [PATCH 252/264] moved main to lm_eval/ to fix import of main error --- main.py => lm_eval/main.py | 0 pyproject.toml | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename main.py => lm_eval/main.py (100%) diff --git a/main.py b/lm_eval/main.py similarity index 100% rename from main.py rename to lm_eval/main.py diff --git a/pyproject.toml b/pyproject.toml index 8fa30cdc..525285d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,8 +47,8 @@ lm_eval = ["**/*.yaml", "tasks/**/*"] examples = ["**/*.yaml"] [project.scripts] -lm-eval = "main:main" -lm_eval = "main:main" +lm-eval = "lm_eval.main:main" +lm_eval = "lm_eval.main:main" [project.urls] Homepage = "https://github.com/EleutherAI/lm-evaluation-harness" -- GitLab From 3b5e554ffdf51506820e73972ba4373d3348516c Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Wed, 4 Oct 2023 14:09:48 +0000 Subject: [PATCH 253/264] move to __main__.py --- lm_eval/{main.py => __main__.py} | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) rename lm_eval/{main.py => __main__.py} (95%) diff --git a/lm_eval/main.py b/lm_eval/__main__.py similarity index 95% rename from lm_eval/main.py rename to lm_eval/__main__.py index 96207884..edfb5d2c 100644 --- a/lm_eval/main.py +++ b/lm_eval/__main__.py @@ -12,10 +12,9 @@ from lm_eval.api.registry import ALL_TASKS from lm_eval.logger import eval_logger, SPACING from lm_eval.tasks import include_task_folder -os.environ["TOKENIZERS_PARALLELISM"] = "false" +from typing import Union - -def parse_args() -> argparse.Namespace: +def parse_eval_args() -> argparse.Namespace: parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("--model", required=True, help="Name of model e.g. `hf`") parser.add_argument( @@ -100,8 +99,13 @@ def parse_args() -> argparse.Namespace: return parser.parse_args() -def main() -> None: - args = parse_args() +def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: + + if not args: + # we allow for args to be passed externally, else we parse them ourselves + args = parse_eval_args() + + os.environ["TOKENIZERS_PARALLELISM"] = "false" if args.limit: eval_logger.warning( @@ -212,5 +216,5 @@ def main() -> None: print(evaluator.make_table(results, "groups")) -if __name__ == "__main__": - main() +if __name__ == "__main__": + cli_evaluate() -- GitLab From 2e13caa64ce5a5195ceb81d7bb834df36f852807 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Wed, 4 Oct 2023 14:11:06 +0000 Subject: [PATCH 254/264] update commandline script --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 525285d0..df668fb3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,8 +47,8 @@ lm_eval = ["**/*.yaml", "tasks/**/*"] examples = ["**/*.yaml"] [project.scripts] -lm-eval = "lm_eval.main:main" -lm_eval = "lm_eval.main:main" +lm-eval = "lm_eval.__main__:cli_evaluate" +lm_eval = "lm_eval.__main__:cli_evaluate" [project.urls] Homepage = "https://github.com/EleutherAI/lm-evaluation-harness" -- GitLab From 0856828fc3d8d0638570644f424156d68be119e1 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Wed, 4 Oct 2023 14:16:35 +0000 Subject: [PATCH 255/264] remove references to main.py --- docs/decontamination.md | 4 ++-- docs/interface.md | 2 +- docs/model_guide.md | 4 ++-- docs/new_task_guide.md | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/decontamination.md b/docs/decontamination.md index 609303dd..f37f1fc1 100644 --- a/docs/decontamination.md +++ b/docs/decontamination.md @@ -2,11 +2,11 @@ ## Usage -Simply add a "--decontamination_ngrams_path" when running main.py. The provided directory should contain +Simply add a "--decontamination_ngrams_path" when running \__main\__.py. The provided directory should contain the ngram files and info.json produced in "Pile Ngram Generation" further down. ```bash -python main.py \ +python -m lm_eval \ --model gpt2 \ --device 0 \ --tasks sciq \ diff --git a/docs/interface.md b/docs/interface.md index 029c635d..860dd1c0 100644 --- a/docs/interface.md +++ b/docs/interface.md @@ -4,7 +4,7 @@ This document details the interface exposed by `lm-eval` and provides details on ## Command-line Interface -A majority of users run the library by cloning it from Github and running the `main.py` script. +A majority of users run the library by cloning it from Github, installing the package as editable, and running the `python -m lm_eval` script. Equivalently, running the library can be done via the `lm-eval` entrypoint at the command line. diff --git a/docs/model_guide.md b/docs/model_guide.md index 8ffbf609..cf79dd77 100644 --- a/docs/model_guide.md +++ b/docs/model_guide.md @@ -70,9 +70,9 @@ smth smth tokenizer-agnostic Congrats on implementing your model! Now it's time to test it out. -To make your model usable via the command line interface to `lm-eval` using `main.py`, you'll need to tell `lm-eval` what your model's name is. +To make your model usable via the command line interface to `lm-eval` using `python -m lm_eval`, you'll need to tell `lm-eval` what your model's name is. -This is done via a *decorator*, `lm_eval.api.registry.register_model`. Using `register_model()`, one can both tell the package what the model's name(s) to be used are when invoking it with `python main.py --model ` and alert `lm-eval` to the model's existence. +This is done via a *decorator*, `lm_eval.api.registry.register_model`. Using `register_model()`, one can both tell the package what the model's name(s) to be used are when invoking it with `python -m lm_eval --model ` and alert `lm-eval` to the model's existence. ```python from lm_eval.api.registry import register_model diff --git a/docs/new_task_guide.md b/docs/new_task_guide.md index 54745f47..cdbc8655 100644 --- a/docs/new_task_guide.md +++ b/docs/new_task_guide.md @@ -258,7 +258,7 @@ You can do this via adding the Python snippet from lm_eval.tasks import include_task_folder include_task_folder("/path/to/yaml/parent/folder") ``` -to the top of any Python file that is run or imported when performing evaluation, such as `main.py`. +to the top of any Python file that is run or imported when performing evaluation, such as `\_\_main\_\_.py`. Passing `--tasks /path/to/yaml/file` is also accepted. -- GitLab From 9b06de5498c37e76b82a75f9a775530b382848f9 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Wed, 4 Oct 2023 14:16:41 +0000 Subject: [PATCH 256/264] remove references to main.py --- README.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index cfd0c75d..6e22436c 100644 --- a/README.md +++ b/README.md @@ -9,8 +9,8 @@ We’d like your help to test it out! you can help by: 2. Porting tasks supported in the previous version of the harness to the new YAML configuration format. Please check out our [task implementation guide](https://github.com/EleutherAI/lm-evaluation-harness/blob/big-refactor/docs/new_task_guide.md) for more information. If you choose to port a task not yet completed according to [our checklist](https://github.com/EleutherAI/lm-evaluation-harness/blob/big-refactor/lm_eval/tasks/README.md), then you can contribute it by opening a PR containing [Refactor] in the name with: -- A command of the form `python main.py --model hf --model_args ..... --tasks ...` which will run the task in the `master` branch, and what the score is -- A command of the form `python main.py --model hf --model_args ..... --tasks ...` to run the task in your PR branch to `big-refactor`, and what the resulting score is, to show that we achieve equality between the two implementations. +- A command of the form `python -m lm_eval --model hf --model_args ..... --tasks ...` which will run the task in the `master` branch, and what the score is +- A command of the form `python -m lm_eval --model hf --model_args ..... --tasks ...` to run the task in your PR branch to `big-refactor`, and what the resulting score is, to show that we achieve equality between the two implementations. Lastly, we'll no longer be accepting new feature requests beyond those that are already open to the master branch as we carry out this switch to the new version over the next week, though we will be accepting bugfixes to `master` branch and PRs to `big-refactor`. Feel free to reach out in the #lm-thunderdome channel of the EAI discord for more information. @@ -67,7 +67,7 @@ To evaluate a model hosted on the [HuggingFace Hub](https://huggingface.co/model ```bash -python main.py \ +python -m lm_eval \ --model hf \ --model_args pretrained=EleutherAI/gpt-j-6B \ --tasks hellaswag \ @@ -78,7 +78,7 @@ python main.py \ Additional arguments can be provided to the model constructor using the `--model_args` flag. Most notably, this supports the common practice of using the `revisions` feature on the Hub to store partially trained checkpoints, or to specify the datatype for running a model: ```bash -python main.py \ +python -m lm_eval \ --model hf \ --model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \ --tasks lambada_openai,hellaswag \ @@ -91,7 +91,7 @@ Models that are loaded via either `transformers.AutoModelForCausalLM` (autoregre Batch size selection can be automated by setting the ```--batch_size``` flag to ```auto```. This will perform automatic detection of the largest batch size that will fit on your device. On tasks where there is a large difference between the longest and shortest example, it can be helpful to periodically recompute the largest batch size, to gain a further speedup. To do this, append ```:N``` to above flag to automatically recompute the largest batch size ```N``` times. For example, to recompute the batch size 4 times, the command would be: ```bash -python main.py \ +python -m lm_eval \ --model hf \ --model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \ --tasks lambada_openai,hellaswag \ @@ -99,7 +99,7 @@ python main.py \ --batch_size auto:4 ``` -Alternatively, you can use `lm-eval` instead of `python main.py` to call lm eval from anywhere. +Alternatively, you can use `lm-eval` or `lm_eval` instead of `python -m lm_eval` to call lm eval from anywhere. ### Multi-GPU Evaluation with Hugging Face `accelerate` @@ -108,7 +108,7 @@ To parallelize evaluation of HuggingFace models across multiple GPUs, we allow f The first is performed by launching evaluation via the `accelerate` library as follows: ``` -accelerate launch main.py \ +accelerate launch -m lm_eval \ --model hf \ --tasks lambada_openai,arc_easy \ --batch_size 16 \ @@ -121,7 +121,7 @@ If your model is *is too large to be run on a single one of your GPUs* then you We also provide an second method to run these large models: use of the `parallelize` argument. ``` -python main.py \ +python -m lm_eval \ --model hf \ --model_args pretrained=EleutherAI/pythia-12b,parallelize=True --tasks lambada_openai,arc_easy \ @@ -136,7 +136,7 @@ To pass even more advanced keyword arguments to `accelerate`, we allow for the f Note that this method naively splits models across GPUs, resulting in only a single GPU performing work at any point in time, and so is much slower than launching with `accelerate launch`, possibly by a factor of the total # of GPUs. -**Note that this option requires launching evaluation via `python main.py` rather than `accelerate launch main.py`.** +**Note that this option requires launching evaluation via `python -m lm_eval` rather than `accelerate launch -m lm_eval`.** To use `accelerate` with the `lm-eval` command, use ``` @@ -167,7 +167,7 @@ Our library supports language models served via the OpenAI Completions API as fo ```bash export OPENAI_API_SECRET_KEY=YOUR_KEY_HERE -python main.py \ +python -m lm_eval \ --model openai-completions \ --model_args engine=davinci \ --tasks lambada_openai,hellaswag @@ -198,7 +198,7 @@ This will write out one text file for each task. To verify the data integrity of the tasks you're performing in addition to running the tasks themselves, you can use the `--check_integrity` flag: ```bash -python main.py \ +python -m lm_eval \ --model openai \ --model_args engine=davinci \ --tasks lambada_openai,hellaswag \ @@ -209,7 +209,7 @@ python main.py \ For models loaded with the HuggingFace `transformers` library, any arguments provided via `--model_args` get passed to the relevant constructor directly. This means that anything you can do with `AutoModel` can be done with our library. For example, you can pass a local path via `pretrained=` or use models finetuned with [PEFT](https://github.com/huggingface/peft) by taking the call you would run to evaluate the base model and add `,peft=PATH` to the `model_args` argument: ```bash -python main.py \ +python -m lm_eval \ --model hf \ --model_args pretrained=EleutherAI/gpt-j-6b,parallelize=True,load_in_4bit=True,peft=nomic-ai/gpt4all-j-lora \ --tasks openbookqa,arc_easy,winogrande,hellaswag,arc_challenge,piqa,boolq \ @@ -219,7 +219,7 @@ python main.py \ [GPTQ](https://github.com/PanQiWei/AutoGPTQ) quantized models can be loaded by specifying their file names in `,gptq=NAME` (or `,gptq=True` for default names) in the `model_args` argument: ```bash -python main.py \ +python -m lm_eval \ --model hf \ --model_args pretrained=model-name-or-path,gptq=model.safetensors,gptq_use_triton=True \ --tasks hellaswag -- GitLab From 436b2697f70cef7b7cd836a2359b13918f2b6531 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Wed, 4 Oct 2023 14:17:01 +0000 Subject: [PATCH 257/264] also, reenable CPU tests --- .github/workflows/new_tasks.yml | 136 +++++++++++++++---------------- .github/workflows/unit_tests.yml | 64 +++++++-------- 2 files changed, 100 insertions(+), 100 deletions(-) diff --git a/.github/workflows/new_tasks.yml b/.github/workflows/new_tasks.yml index fb406988..ebb28a30 100644 --- a/.github/workflows/new_tasks.yml +++ b/.github/workflows/new_tasks.yml @@ -1,72 +1,72 @@ -# name: Tasks Modified +name: Tasks Modified -# on: -# push: -# branches: -# - 'big-refactor*' -# pull_request: -# branches: -# - 'big-refactor*' -# workflow_dispatch: -# # comment/edit out the above to stop/change the triggers -# jobs: -# changed_files: -# runs-on: ubuntu-latest # windows-latest || macos-latest -# timeout-minutes: 120 -# name: Scan for changed tasks -# steps: -# - name: checkout -# uses: actions/checkout@v3 -# with: -# fetch-depth: 2 # OR "2" -> To retrieve the preceding commit. +on: + push: + branches: + - 'big-refactor*' + pull_request: + branches: + - 'big-refactor*' + workflow_dispatch: +# comment/edit out the above to stop/change the triggers +jobs: + changed_files: + runs-on: ubuntu-latest # windows-latest || macos-latest + timeout-minutes: 120 + name: Scan for changed tasks + steps: + - name: checkout + uses: actions/checkout@v3 + with: + fetch-depth: 2 # OR "2" -> To retrieve the preceding commit. -# # Uses the tj-actions/changed-files@v37 action to check for changes. -# # Outputs provided here: https://github.com/tj-actions/changed-files#outputs -# # The `files_yaml` input optionally takes a yaml string to specify filters, -# # and prepends the filter name to the standard output names. -# - name: Check task folders -# id: changed-tasks -# uses: tj-actions/changed-files@v37.1.2 -# with: -# # tasks checks the tasks folder and api checks the api folder for changes -# files_yaml: | -# tasks: -# - lm_eval/tasks/** -# api: -# - lm_eval/api/** -# write_output_files: true + # Uses the tj-actions/changed-files@v37 action to check for changes. + # Outputs provided here: https://github.com/tj-actions/changed-files#outputs + # The `files_yaml` input optionally takes a yaml string to specify filters, + # and prepends the filter name to the standard output names. + - name: Check task folders + id: changed-tasks + uses: tj-actions/changed-files@v37.1.2 + with: + # tasks checks the tasks folder and api checks the api folder for changes + files_yaml: | + tasks: + - lm_eval/tasks/** + api: + - lm_eval/api/** + write_output_files: true -# # The next step is optional; the files are written to the workspace by default (above). -# # so it's just for debugging -# - name: Run Tests -# if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' -# run: | -# echo .github/outputs/tasks_all_changed_and_modified_files.txt >> 'GITHUB_ENV' -# echo "One or more test file(s) has changed." -# echo "List of all the files that have changed: ${{ steps.changed-tasks.outputs.tasks_all_modified_files }}" + # The next step is optional; the files are written to the workspace by default (above). + # so it's just for debugging + - name: Run Tests + if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' + run: | + echo .github/outputs/tasks_all_changed_and_modified_files.txt >> 'GITHUB_ENV' + echo "One or more test file(s) has changed." + echo "List of all the files that have changed: ${{ steps.changed-tasks.outputs.tasks_all_modified_files }}" -# - name: Set up Python 3.9 -# if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' -# uses: actions/setup-python@v4 -# with: -# python-version: 3.9 -# cache: 'pip' -# cache-dependency-path: setup.py -# - name: Install dependencies -# if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' -# run: | -# python -m pip install --upgrade pip -# pip install -e '.[testing]' --extra-index-url https://download.pytorch.org/whl/cpu -# # Install optional git dependencies -# # pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt -# # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi -# - name: Test with pytest -# # if new tasks are added, run tests on them -# if: steps.changed-tasks.outputs.tasks_any_modified == 'true' -# run: python -m pytest tests/test_tasks.py -s -vv -# # if api is modified, run tests on it -# - name: Test more tasks with pytest -# env: -# API: true -# if: steps.changed-tasks.outputs.api_any_modified == 'true' -# run: python -m pytest tests/test_tasks.py -s -vv + - name: Set up Python 3.9 + if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' + uses: actions/setup-python@v4 + with: + python-version: 3.9 + cache: 'pip' + cache-dependency-path: setup.py + - name: Install dependencies + if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' + run: | + python -m pip install --upgrade pip + pip install -e '.[testing]' --extra-index-url https://download.pytorch.org/whl/cpu + # Install optional git dependencies + # pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt + # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Test with pytest + # if new tasks are added, run tests on them + if: steps.changed-tasks.outputs.tasks_any_modified == 'true' + run: python -m pytest tests/test_tasks.py -s -vv + # if api is modified, run tests on it + - name: Test more tasks with pytest + env: + API: true + if: steps.changed-tasks.outputs.api_any_modified == 'true' + run: python -m pytest tests/test_tasks.py -s -vv diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index c56347dd..4f105d09 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -43,35 +43,35 @@ jobs: # # mypy turned off for now # - name: Lint with mypy # run: mypy . --ignore-missing-imports --check-untyped-defs --explicit-package-bases --warn-unreachable -# Job 2 -# testcpu: -# name: CPU Tests -# runs-on: ubuntu-latest -# strategy: -# matrix: -# python-version: [ "3.8", "3.9", "3.10", "3.11" ] -# timeout-minutes: 30 -# steps: -# - name: Checkout Code -# uses: actions/checkout@v3 -# - name: Set up Python ${{ matrix.python-version }} -# uses: actions/setup-python@v4 -# with: -# python-version: ${{ matrix.python-version }} -# cache: pip -# cache-dependency-path: setup.py -# - name: Install dependencies -# run: | -# python -m pip install --upgrade pip -# pip install -e '.[testing,anthropic,sentencepiece]' --extra-index-url https://download.pytorch.org/whl/cpu -# # Install optional git dependencies -# # pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt -# # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi -# - name: Test with pytest -# run: python -m pytest --showlocals -s -vv -n=auto --ignore=tests/tests_master --ignore=tests/extra -# - name: Archive artifacts -# uses: actions/upload-artifact@v3 -# with: -# name: output_results -# path: | -# test_logs/* +Job 2 + testcpu: + name: CPU Tests + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [ "3.8", "3.9", "3.10", "3.11" ] + timeout-minutes: 30 + steps: + - name: Checkout Code + uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: pip + cache-dependency-path: setup.py + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e '.[testing,anthropic,sentencepiece]' --extra-index-url https://download.pytorch.org/whl/cpu +# Install optional git dependencies +# pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt +# if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Test with pytest + run: python -m pytest --showlocals -s -vv -n=auto --ignore=tests/tests_master --ignore=tests/extra + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: output_results + path: | + test_logs/* -- GitLab From 54498fcc65167f2afb71a348a6deaf7bfd927bca Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Wed, 4 Oct 2023 14:35:31 +0000 Subject: [PATCH 258/264] change back to original dataset path --- lm_eval/tasks/bigbench/greedy_until_template_yaml | 2 +- lm_eval/tasks/bigbench/multiple_choice_template_yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/bigbench/greedy_until_template_yaml b/lm_eval/tasks/bigbench/greedy_until_template_yaml index 3de59b2b..130500cc 100644 --- a/lm_eval/tasks/bigbench/greedy_until_template_yaml +++ b/lm_eval/tasks/bigbench/greedy_until_template_yaml @@ -1,5 +1,5 @@ group: bigbench -dataset_path: hails/bigbench +dataset_path: bigbench # will switch to `hails/bigbench` when all tasks are pushed output_type: greedy_until dataset_kwargs: # num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods diff --git a/lm_eval/tasks/bigbench/multiple_choice_template_yaml b/lm_eval/tasks/bigbench/multiple_choice_template_yaml index 3a8185e8..3de7b5b7 100644 --- a/lm_eval/tasks/bigbench/multiple_choice_template_yaml +++ b/lm_eval/tasks/bigbench/multiple_choice_template_yaml @@ -1,5 +1,5 @@ group: bigbench -dataset_path: hails/bigbench +dataset_path: bigbench # will switch to `hails/bigbench` when all tasks are pushed dataset_kwargs: # num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods # subtask_name: null -- GitLab From 51a43a768ea527e9313fb28005c59bdd936e862c Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Wed, 4 Oct 2023 14:46:10 +0000 Subject: [PATCH 259/264] change back to original dataset path --- lm_eval/tasks/bigbench/push_bigbench_dataset.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lm_eval/tasks/bigbench/push_bigbench_dataset.py b/lm_eval/tasks/bigbench/push_bigbench_dataset.py index acc37465..7566a664 100644 --- a/lm_eval/tasks/bigbench/push_bigbench_dataset.py +++ b/lm_eval/tasks/bigbench/push_bigbench_dataset.py @@ -2,9 +2,9 @@ A utility script that pushes all Bigbench subtasks from their form in the `bigbench` HF dataset into `{org name}/bigbench`. -Prior to running, must log into HF Hub for the target HF hub org via `huggingface-cli login`. +Prior to running, log into HF Hub for the target HF hub org via `huggingface-cli login`. -Requires the installation of +Requires the installation of `pip install "bigbench @ https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz"` and is included so that the bigbench dependency can be avoided. """ @@ -20,7 +20,7 @@ num_shots = [0] for shots in num_shots: for task_name in tqdm(all_task_names): - try: + try: print(f"Loading '{task_name}' with num_shots={shots}...") task_ds = datasets.load_dataset("bigbench", name=task_name, num_shots=shots) @@ -29,4 +29,4 @@ for shots in num_shots: del task_ds except Exception as e: - raise e \ No newline at end of file + raise e -- GitLab From 09d935eeb3e0c21c095d5ca1df5f711746196d46 Mon Sep 17 00:00:00 2001 From: haileyschoelkopf Date: Wed, 4 Oct 2023 21:49:20 +0000 Subject: [PATCH 260/264] fix wildcards --- lm_eval/__main__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lm_eval/__main__.py b/lm_eval/__main__.py index edfb5d2c..2eb191ef 100644 --- a/lm_eval/__main__.py +++ b/lm_eval/__main__.py @@ -14,6 +14,7 @@ from lm_eval.tasks import include_task_folder from typing import Union + def parse_eval_args() -> argparse.Namespace: parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("--model", required=True, help="Name of model e.g. `hf`") @@ -136,8 +137,6 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: if os.path.isfile(task): config = utils.load_yaml_config(task) task_names.append(config) - else: - task_missing.append(task) if task_missing != []: missing = ", ".join(task_missing) @@ -216,5 +215,5 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: print(evaluator.make_table(results, "groups")) -if __name__ == "__main__": +if __name__ == "__main__": cli_evaluate() -- GitLab From 20a54b3af209ea191051cdb8a8abb3b6b1996048 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 5 Oct 2023 15:49:48 +0000 Subject: [PATCH 261/264] removed print messages, added cot extraction strings --- lm_eval/tasks/__init__.py | 2 -- .../bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml | 2 +- .../mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 64dd4fdb..c139d849 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -141,8 +141,6 @@ def include_task_folder(task_dir: str, register_task: bool = True) -> None: except Exception as error: import traceback - print("###") - print(yaml_path) eval_logger.warning( "Failed to load config in\n" f" {yaml_path}\n" diff --git a/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml index b6574a4e..7ccf3699 100644 --- a/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml +++ b/lm_eval/tasks/bbh/flan_cot_zeroshot/_flan_cot_zeroshot_template_yaml @@ -18,5 +18,5 @@ filter_list: - name: "get-answer" filter: - function: "regex" - regex_pattern: "(?<=the answer is )(.*)(?=.)" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" - function: "take_first" diff --git a/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml b/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml index c9b03734..0666018b 100644 --- a/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml +++ b/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_generative_template_yaml @@ -9,7 +9,7 @@ filter_list: - name: "get-answer" filter: - function: "regex" - regex_pattern: "(?<=The answer is )(.*)(?=.)" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" - function: "take_first" generation_kwargs: until: -- GitLab From f2c396ab044d3d5f5087757051ecb0534eebea6a Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Thu, 5 Oct 2023 15:50:19 +0000 Subject: [PATCH 262/264] pre-commit reformat --- lm_eval/tasks/bigbench/README.md | 2 +- lm_eval/tasks/bigbench/generate_tasks.py | 351 ++++++++++++----------- 2 files changed, 180 insertions(+), 173 deletions(-) diff --git a/lm_eval/tasks/bigbench/README.md b/lm_eval/tasks/bigbench/README.md index bfb7d457..be680eac 100644 --- a/lm_eval/tasks/bigbench/README.md +++ b/lm_eval/tasks/bigbench/README.md @@ -6,7 +6,7 @@ Title: `Beyond the Imitation Game: Quantifying and extrapolating the capabilitie Abstract: https://arxiv.org/abs/2206.04615 -The Beyond the Imitation Game Benchmark (BIG-bench) is a collaborative benchmark intended to probe large language models and extrapolate their future capabilities. +The Beyond the Imitation Game Benchmark (BIG-bench) is a collaborative benchmark intended to probe large language models and extrapolate their future capabilities. Homepage: https://github.com/google/BIG-bench diff --git a/lm_eval/tasks/bigbench/generate_tasks.py b/lm_eval/tasks/bigbench/generate_tasks.py index dbd7a959..00a8799e 100644 --- a/lm_eval/tasks/bigbench/generate_tasks.py +++ b/lm_eval/tasks/bigbench/generate_tasks.py @@ -2,179 +2,182 @@ import os import yaml all_subtasks = [ - 'abstract_narrative_understanding', - 'anachronisms', - 'analogical_similarity', - 'analytic_entailment', - 'arithmetic', - 'ascii_word_recognition', - 'authorship_verification', - 'auto_categorization', - 'auto_debugging', - 'bbq_lite_json', - 'bridging_anaphora_resolution_barqa', - 'causal_judgment', - 'cause_and_effect', - 'checkmate_in_one', - 'chess_state_tracking', - 'chinese_remainder_theorem', - 'cifar10_classification', - 'code_line_description', - 'codenames', - 'color', - 'common_morpheme', - 'conceptual_combinations', - 'conlang_translation', - 'contextual_parametric_knowledge_conflicts', - 'crash_blossom', - 'crass_ai', - 'cryobiology_spanish', - 'cryptonite', - 'cs_algorithms', - 'dark_humor_detection', - 'date_understanding', - 'disambiguation_qa', - 'discourse_marker_prediction', - 'disfl_qa', - 'dyck_languages', - 'elementary_math_qa', - 'emoji_movie', - 'emojis_emotion_prediction', - 'empirical_judgments', - 'english_proverbs', - 'english_russian_proverbs', - 'entailed_polarity', - 'entailed_polarity_hindi', - 'epistemic_reasoning', - 'evaluating_information_essentiality', - 'fact_checker', - 'fantasy_reasoning', - 'few_shot_nlg', - 'figure_of_speech_detection', - 'formal_fallacies_syllogisms_negation', - 'gem', - 'gender_inclusive_sentences_german', - 'general_knowledge', - 'geometric_shapes', - 'goal_step_wikihow', - 'gre_reading_comprehension', - 'hhh_alignment', - 'hindi_question_answering', - 'hindu_knowledge', - 'hinglish_toxicity', - 'human_organs_senses', - 'hyperbaton', - 'identify_math_theorems', - 'identify_odd_metaphor', - 'implicatures', - 'implicit_relations', - 'intent_recognition', - 'international_phonetic_alphabet_nli', - 'international_phonetic_alphabet_transliterate', - 'intersect_geometry', - 'irony_identification', - 'kanji_ascii', - 'kannada', - 'key_value_maps', - 'known_unknowns', - 'language_games', - 'language_identification', - 'linguistic_mappings', - 'linguistics_puzzles', - 'list_functions', - 'logic_grid_puzzle', - 'logical_args', - 'logical_deduction', - 'logical_fallacy_detection', - 'logical_sequence', - 'mathematical_induction', - 'matrixshapes', - 'metaphor_boolean', - 'metaphor_understanding', - 'minute_mysteries_qa', - 'misconceptions', - 'misconceptions_russian', - 'mnist_ascii', - 'modified_arithmetic', - 'moral_permissibility', - 'movie_dialog_same_or_different', - 'movie_recommendation', - 'mult_data_wrangling', - 'multiemo', - 'natural_instructions', - 'navigate', - 'nonsense_words_grammar', - 'novel_concepts', - 'object_counting', - 'odd_one_out', - 'operators', - 'paragraph_segmentation', - 'parsinlu_qa', - 'parsinlu_reading_comprehension', - 'penguins_in_a_table', - 'periodic_elements', - 'persian_idioms', - 'phrase_relatedness', - 'physical_intuition', - 'physics', - 'physics_questions', - 'play_dialog_same_or_different', - 'polish_sequence_labeling', - 'presuppositions_as_nli', - 'qa_wikidata', - 'question_selection', - 'real_or_fake_text', - 'reasoning_about_colored_objects', - 'repeat_copy_logic', - 'rephrase', - 'riddle_sense', - 'ruin_names', - 'salient_translation_error_detection', - 'scientific_press_release', - 'semantic_parsing_in_context_sparc', - 'semantic_parsing_spider', - 'sentence_ambiguity', - 'similarities_abstraction', - 'simp_turing_concept', - 'simple_arithmetic_json', - 'simple_arithmetic_json_multiple_choice', - 'simple_arithmetic_json_subtasks', - 'simple_arithmetic_multiple_targets_json', - 'simple_ethical_questions', - 'simple_text_editing', - 'snarks', - 'social_iqa', - 'social_support', - 'sports_understanding', - 'strange_stories', - 'strategyqa', - 'sufficient_information', - 'suicide_risk', - 'swahili_english_proverbs', - 'swedish_to_german_proverbs', - 'symbol_interpretation', - 'temporal_sequences', - 'tense', - 'timedial', - 'topical_chat', - 'tracking_shuffled_objects', - 'understanding_fables', - 'undo_permutation', - 'unit_conversion', - 'unit_interpretation', - 'unnatural_in_context_learning', - 'vitaminc_fact_verification', - 'what_is_the_tao', - 'which_wiki_edit', - 'winowhy', - 'word_sorting', - 'word_unscrambling' - ] + "abstract_narrative_understanding", + "anachronisms", + "analogical_similarity", + "analytic_entailment", + "arithmetic", + "ascii_word_recognition", + "authorship_verification", + "auto_categorization", + "auto_debugging", + "bbq_lite_json", + "bridging_anaphora_resolution_barqa", + "causal_judgment", + "cause_and_effect", + "checkmate_in_one", + "chess_state_tracking", + "chinese_remainder_theorem", + "cifar10_classification", + "code_line_description", + "codenames", + "color", + "common_morpheme", + "conceptual_combinations", + "conlang_translation", + "contextual_parametric_knowledge_conflicts", + "crash_blossom", + "crass_ai", + "cryobiology_spanish", + "cryptonite", + "cs_algorithms", + "dark_humor_detection", + "date_understanding", + "disambiguation_qa", + "discourse_marker_prediction", + "disfl_qa", + "dyck_languages", + "elementary_math_qa", + "emoji_movie", + "emojis_emotion_prediction", + "empirical_judgments", + "english_proverbs", + "english_russian_proverbs", + "entailed_polarity", + "entailed_polarity_hindi", + "epistemic_reasoning", + "evaluating_information_essentiality", + "fact_checker", + "fantasy_reasoning", + "few_shot_nlg", + "figure_of_speech_detection", + "formal_fallacies_syllogisms_negation", + "gem", + "gender_inclusive_sentences_german", + "general_knowledge", + "geometric_shapes", + "goal_step_wikihow", + "gre_reading_comprehension", + "hhh_alignment", + "hindi_question_answering", + "hindu_knowledge", + "hinglish_toxicity", + "human_organs_senses", + "hyperbaton", + "identify_math_theorems", + "identify_odd_metaphor", + "implicatures", + "implicit_relations", + "intent_recognition", + "international_phonetic_alphabet_nli", + "international_phonetic_alphabet_transliterate", + "intersect_geometry", + "irony_identification", + "kanji_ascii", + "kannada", + "key_value_maps", + "known_unknowns", + "language_games", + "language_identification", + "linguistic_mappings", + "linguistics_puzzles", + "list_functions", + "logic_grid_puzzle", + "logical_args", + "logical_deduction", + "logical_fallacy_detection", + "logical_sequence", + "mathematical_induction", + "matrixshapes", + "metaphor_boolean", + "metaphor_understanding", + "minute_mysteries_qa", + "misconceptions", + "misconceptions_russian", + "mnist_ascii", + "modified_arithmetic", + "moral_permissibility", + "movie_dialog_same_or_different", + "movie_recommendation", + "mult_data_wrangling", + "multiemo", + "natural_instructions", + "navigate", + "nonsense_words_grammar", + "novel_concepts", + "object_counting", + "odd_one_out", + "operators", + "paragraph_segmentation", + "parsinlu_qa", + "parsinlu_reading_comprehension", + "penguins_in_a_table", + "periodic_elements", + "persian_idioms", + "phrase_relatedness", + "physical_intuition", + "physics", + "physics_questions", + "play_dialog_same_or_different", + "polish_sequence_labeling", + "presuppositions_as_nli", + "qa_wikidata", + "question_selection", + "real_or_fake_text", + "reasoning_about_colored_objects", + "repeat_copy_logic", + "rephrase", + "riddle_sense", + "ruin_names", + "salient_translation_error_detection", + "scientific_press_release", + "semantic_parsing_in_context_sparc", + "semantic_parsing_spider", + "sentence_ambiguity", + "similarities_abstraction", + "simp_turing_concept", + "simple_arithmetic_json", + "simple_arithmetic_json_multiple_choice", + "simple_arithmetic_json_subtasks", + "simple_arithmetic_multiple_targets_json", + "simple_ethical_questions", + "simple_text_editing", + "snarks", + "social_iqa", + "social_support", + "sports_understanding", + "strange_stories", + "strategyqa", + "sufficient_information", + "suicide_risk", + "swahili_english_proverbs", + "swedish_to_german_proverbs", + "symbol_interpretation", + "temporal_sequences", + "tense", + "timedial", + "topical_chat", + "tracking_shuffled_objects", + "understanding_fables", + "undo_permutation", + "unit_conversion", + "unit_interpretation", + "unnatural_in_context_learning", + "vitaminc_fact_verification", + "what_is_the_tao", + "which_wiki_edit", + "winowhy", + "word_sorting", + "word_unscrambling", +] def main() -> None: - for path, task_type in zip(["multiple_choice", "greedy_until"], ["multiple_choice_template_yaml", "greedy_until_template_yaml"]): + for path, task_type in zip( + ["multiple_choice", "greedy_until"], + ["multiple_choice_template_yaml", "greedy_until_template_yaml"], + ): os.makedirs(path, exist_ok=True) for task in all_subtasks: file_name = f"{task}.yaml" @@ -184,11 +187,15 @@ def main() -> None: yaml.dump( { "include": f"../{task_type}", - "task": "bigbench_" + task + "_{}".format(task_type.split("_template_yaml")[0]), - "dataset_name": task + "_zero_shot", # zero-shot version of the dataset + "task": "bigbench_" + + task + + "_{}".format(task_type.split("_template_yaml")[0]), + "dataset_name": task + + "_zero_shot", # zero-shot version of the dataset }, f, - width=float("inf"), allow_unicode=True + width=float("inf"), + allow_unicode=True, ) except FileExistsError: pass -- GitLab From 2493c6ccc7605933275ed7cb34221b8d8086eb33 Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Fri, 6 Oct 2023 08:01:25 +0000 Subject: [PATCH 263/264] changed default to 5-shot --- lm_eval/tasks/benchmarks/flan/flan_held_out.yaml | 9 +++++---- lm_eval/tasks/mmlu/default/_default_template_yaml | 1 + .../mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml | 1 + .../flan_n_shot/_mmlu_flan_loglikelihood_template_yaml | 1 + 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml b/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml index 150e9477..e1429cda 100644 --- a/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml +++ b/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml @@ -3,10 +3,11 @@ task: # BBH - bbh_flan_zeroshot - bbh_flan_fewshot - # - bbh_flan_cot_fewshot - # - bbh_flan_cot_zeroshot + - bbh_flan_cot_fewshot + - bbh_flan_cot_zeroshot # MMLU + - mmlu - mmlu_flan_n_shot_generative - mmlu_flan_n_shot_loglikelihood - # - mmlu_flan_cot_zeroshot - # - mmlu_flan_cot_fewshot + - mmlu_flan_cot_zeroshot + - mmlu_flan_cot_fewshot diff --git a/lm_eval/tasks/mmlu/default/_default_template_yaml b/lm_eval/tasks/mmlu/default/_default_template_yaml index bd989c40..93aea1f6 100644 --- a/lm_eval/tasks/mmlu/default/_default_template_yaml +++ b/lm_eval/tasks/mmlu/default/_default_template_yaml @@ -15,3 +15,4 @@ metric_list: - metric: acc_norm aggregation: mean higher_is_better: true +num_fewshot: 5 diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml index b1ff96a8..8e39ff49 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml @@ -12,3 +12,4 @@ metric_list: - metric: exact_match aggregation: mean higher_is_better: true +num_fewshot: 5 diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml index 5db2981a..15196e7e 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml @@ -13,3 +13,4 @@ metric_list: - metric: acc_norm aggregation: mean higher_is_better: true +num_fewshot: 5 -- GitLab From 7d5e511c2d56e04bf7c94e66ac56c9e160c5493d Mon Sep 17 00:00:00 2001 From: lintangsutawika Date: Fri, 6 Oct 2023 14:16:51 +0000 Subject: [PATCH 264/264] adjustments --- lm_eval/prompts/__init__.py | 5 +++++ lm_eval/tasks/mmlu/default/_default_template_yaml | 1 - .../mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml | 1 - .../mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml | 1 - 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/lm_eval/prompts/__init__.py b/lm_eval/prompts/__init__.py index 68eeac6c..746dc3da 100644 --- a/lm_eval/prompts/__init__.py +++ b/lm_eval/prompts/__init__.py @@ -116,6 +116,11 @@ class PromptString: doc_to_text = self.prompt_string["doc_to_text"] doc_to_target = self.prompt_string["doc_to_target"] + + # TODO need a way to process doc_to_choice + if "doc_to_choice" in self.prompt_string: + raise "Not yet implemented to accept doc_to_choice" + text_string = utils.apply_template(doc_to_text, doc) target_string = utils.apply_template(doc_to_target, doc) diff --git a/lm_eval/tasks/mmlu/default/_default_template_yaml b/lm_eval/tasks/mmlu/default/_default_template_yaml index 93aea1f6..bd989c40 100644 --- a/lm_eval/tasks/mmlu/default/_default_template_yaml +++ b/lm_eval/tasks/mmlu/default/_default_template_yaml @@ -15,4 +15,3 @@ metric_list: - metric: acc_norm aggregation: mean higher_is_better: true -num_fewshot: 5 diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml index 8e39ff49..b1ff96a8 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_generative_template_yaml @@ -12,4 +12,3 @@ metric_list: - metric: exact_match aggregation: mean higher_is_better: true -num_fewshot: 5 diff --git a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml index 15196e7e..5db2981a 100644 --- a/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml +++ b/lm_eval/tasks/mmlu/flan_n_shot/_mmlu_flan_loglikelihood_template_yaml @@ -13,4 +13,3 @@ metric_list: - metric: acc_norm aggregation: mean higher_is_better: true -num_fewshot: 5 -- GitLab