Commit 15e3f5c5 authored by haileyschoelkopf's avatar haileyschoelkopf
Browse files

add results

parent 69a82648
{
"results": {
"gsm8k_cot_llama": {
"alias": "gsm8k_cot_llama",
"exact_match,strict-match": 0.7725549658832449,
"exact_match_stderr,strict-match": 0.01154636331254809,
"exact_match,flexible-extract": 0.7778620166793025,
"exact_match_stderr,flexible-extract": 0.011449986902435323
}
},
"group_subtasks": {
"gsm8k_cot_llama": []
},
"configs": {
"gsm8k_cot_llama": {
"task": "gsm8k_cot_llama",
"tag": [
"chain_of_thought"
],
"dataset_path": "gsm8k",
"dataset_name": "main",
"test_split": "test",
"doc_to_text": "Given the following problem, reason and give a final answer to the problem.\nProblem: {{question}}\nYour response should end with \"The final answer is [answer]\" where [answer] is the response to the problem.\n",
"doc_to_target": "{{answer.split('####')[-1].strip() if answer is defined else target}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n",
"samples": [
{
"question": "There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?",
"target": "There are 15 trees originally. Then there were 21 trees after some more were planted. So there must have been 21 - 15 = 6. The final answer is 6"
},
{
"question": "If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?",
"target": "There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5. The final answer is 5"
},
{
"question": "Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?",
"target": "Originally, Leah had 32 chocolates. Her sister had 42. So in total they had 32 + 42 = 74. After eating 35, they had 74 - 35 = 39. The final answer is 39"
},
{
"question": "Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?",
"target": "Jason started with 20 lollipops. Then he had 12 after giving some to Denny. So he gave Denny 20 - 12 = 8. The final answer is 8"
},
{
"question": "Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?",
"target": "Shawn started with 5 toys. If he got 2 toys each from his mom and dad, then that is 4 more toys. 5 + 4 = 9. The final answer is 9"
},
{
"question": "There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?",
"target": "There were originally 9 computers. For each of 4 days, 5 more computers were added. So 5 * 4 = 20 computers were added. 9 + 20 is 29. The final answer is 29"
},
{
"question": "Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?",
"target": "Michael started with 58 golf balls. After losing 23 on tuesday, he had 58 - 23 = 35. After losing 2 more, he had 35 - 2 = 33 golf balls. The final answer is 33"
},
{
"question": "Olivia has $23. She bought five bagels for $3 each. How much money does she have left?",
"target": "Olivia had 23 dollars. 5 bagels for 3 dollars each will be 5 x 3 = 15 dollars. So she has 23 - 15 dollars left. 23 - 15 is 8. The final answer is 8"
}
]
},
"num_fewshot": 5,
"metric_list": [
{
"aggregation": "mean",
"higher_is_better": true,
"ignore_case": true,
"ignore_punctuation": false,
"metric": "exact_match",
"regexes_to_ignore": [
",",
"\\$",
"(?s).*#### ",
"\\.$"
]
}
],
"output_type": "generate_until",
"generation_kwargs": {
"do_sample": false,
"until": [
"<|eot_id|>",
"<|start_header_id|>user<|end_header_id|>",
"Q:",
"</s>",
"<|im_end|>"
]
},
"repeats": 1,
"filter_list": [
{
"filter": [
{
"function": "regex",
"group_select": -1,
"regex_pattern": "The final answer is ((-?[$0-9.,]{2,})|(-?[0-9]+))"
},
{
"function": "take_first"
}
],
"name": "strict-match"
},
{
"filter": [
{
"function": "regex",
"group_select": -1,
"regex_pattern": "(-?[$0-9.,]{2,})|(-?[0-9]+)"
},
{
"function": "take_first"
}
],
"name": "flexible-extract"
}
],
"should_decontaminate": false,
"metadata": {
"version": 3.0
}
}
},
"versions": {
"gsm8k_cot_llama": 3.0
},
"n-shot": {
"gsm8k_cot_llama": 5
},
"higher_is_better": {
"gsm8k_cot_llama": {
"exact_match": true
}
},
"n-samples": {
"gsm8k_cot_llama": {
"original": 1319,
"effective": 1319
}
},
"config": {
"model": "vllm",
"model_args": "pretrained=meta-llama/Meta-Llama-3-8B-Instruct",
"batch_size": "auto",
"batch_sizes": [],
"device": null,
"use_cache": null,
"limit": null,
"bootstrap_iters": 100000,
"gen_kwargs": null,
"random_seed": 0,
"numpy_seed": 1234,
"torch_seed": 1234,
"fewshot_seed": 1234
},
"git_hash": "7489a342",
"date": 1724622900.312985,
"pretty_env_info": "'NoneType' object has no attribute 'splitlines'",
"transformers_version": "4.44.1",
"upper_git_hash": null,
"tokenizer_pad_token": [
"<|eot_id|>",
"128009"
],
"tokenizer_eos_token": [
"<|eot_id|>",
"128009"
],
"tokenizer_bos_token": [
"<|begin_of_text|>",
"128000"
],
"eot_token_id": 128009,
"max_length": 8192,
"task_hashes": {
"gsm8k_cot_llama": "a978e36e39aa2bdf4bfe41af21fd2ffcf052677fd439dde950d6904680741ba8"
},
"model_source": "vllm",
"model_name": "meta-llama/Meta-Llama-3-8B-Instruct",
"model_name_sanitized": "meta-llama__Meta-Llama-3-8B-Instruct",
"system_instruction": null,
"system_instruction_sha": null,
"fewshot_as_multiturn": true,
"chat_template": null,
"chat_template_sha": null,
"start_time": 6496343.936180836,
"end_time": 6496600.2954714,
"total_evaluation_time_seconds": "256.35929056443274"
}
\ No newline at end of file
{
"results": {
"gsm8k_judge_2": {
"alias": "gsm8k_judge_2",
"exact_match,test": 0.8165276724791509,
"exact_match_stderr,test": 0.01066137044869965
}
},
"group_subtasks": {
"gsm8k_judge_2": []
},
"configs": {
"gsm8k_judge_2": {
"output_path": "/home/mchorse/lm-evaluation-harness/gsm8k_resps/meta-llama__Meta-Llama-3-8B-Instruct/samples_gsm8k_cot_llama_2024-08-25T21-59-12.123082.jsonl",
"task": "gsm8k_judge_2",
"tag": [
"judge"
],
"dataset_path": "gsm8k",
"dataset_name": "main",
"training_split": "train",
"test_split": "test",
"fewshot_split": "train",
"doc_to_text": "Given the following question and reference answer, verify if the attempted answer is correct. If it is, return \"The answer is Correct\". If it is incorrect, return \"The answer is Incorrect\".\\nQuestion: {{question}}\\nReference Answer: {{answer}}\\nAnswer Attempt: {{resp}}",
"doc_to_target": "Correct",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 5,
"metric_list": [
{
"metric": "exact_match",
"aggregation": "mean",
"higher_is_better": true,
"ignore_case": true,
"ignore_punctuation": true,
"regexes_to_ignore": [
",",
"\\$",
"(?s).*#### ",
"\\.$"
]
}
],
"output_type": "generate_until",
"generation_kwargs": {
"until": [
"<|start_header_id|>user<|end_header_id|>",
"Question:",
"</s>",
"<|im_end|>"
],
"do_sample": false,
"temperature": 0.0
},
"repeats": 1,
"filter_list": [
{
"name": "test",
"filter": [
{
"function": "regex",
"regex_pattern": "(Correct|Incorrect)"
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"metadata": {
"version": 3.0
}
}
},
"versions": {
"gsm8k_judge_2": 3.0
},
"n-shot": {
"gsm8k_judge_2": 5
},
"higher_is_better": {
"gsm8k_judge_2": {
"exact_match": true
}
},
"n-samples": {
"gsm8k_judge_2": {
"original": 1319,
"effective": 1319
}
},
"config": {
"model": "vllm",
"model_args": "pretrained=meta-llama/Meta-Llama-3.1-70B-Instruct,max_length=4096,tensor_parallel_size=4,gpu_memory_utilization=0.85",
"batch_size": "auto",
"batch_sizes": [],
"device": null,
"use_cache": null,
"limit": null,
"bootstrap_iters": 100000,
"gen_kwargs": null,
"random_seed": 0,
"numpy_seed": 1234,
"torch_seed": 1234,
"fewshot_seed": 1234
},
"git_hash": "69a82648",
"date": 1724670994.7125044,
"pretty_env_info": "'NoneType' object has no attribute 'splitlines'",
"transformers_version": "4.44.2",
"upper_git_hash": null,
"tokenizer_pad_token": [
"<|eot_id|>",
"128009"
],
"tokenizer_eos_token": [
"<|eot_id|>",
"128009"
],
"tokenizer_bos_token": [
"<|begin_of_text|>",
"128000"
],
"eot_token_id": 128009,
"max_length": 4096,
"task_hashes": {
"gsm8k_judge_2": "2d5235e1cea72159554b8768090f30eb9c56d68b614c4b1851ed2441d5c87590"
},
"model_source": "vllm",
"model_name": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"model_name_sanitized": "meta-llama__Meta-Llama-3.1-70B-Instruct",
"system_instruction": null,
"system_instruction_sha": null,
"fewshot_as_multiturn": true,
"chat_template": null,
"chat_template_sha": null,
"start_time": 6544438.38826315,
"end_time": 6545862.296918048,
"total_evaluation_time_seconds": "1423.9086548974738"
}
\ No newline at end of file
...@@ -34,7 +34,7 @@ class JudgeTask(ConfigurableTask): ...@@ -34,7 +34,7 @@ class JudgeTask(ConfigurableTask):
resps = [] resps = []
# load json # load json
if self.output_path is not None: if self.output_path is not None:
with open(self.output_path, "r") as f: with open(self.output_path, "r", encoding='utf-8') as f:
for line in f: for line in f:
resp = json.loads(line) resp = json.loads(line)
resps.append({"resp": resp["resps"][0][0], "doc": resp["doc_id"]}) resps.append({"resp": resp["resps"][0][0], "doc": resp["doc_id"]})
......
...@@ -292,7 +292,7 @@ def simple_evaluate( ...@@ -292,7 +292,7 @@ def simple_evaluate(
model_source=model, model_source=model,
model_args=model_args, model_args=model_args,
system_instruction=system_instruction, system_instruction=system_instruction,
chat_template=lm.chat_template(apply_chat_template), chat_template=None,
fewshot_as_multiturn=fewshot_as_multiturn, fewshot_as_multiturn=fewshot_as_multiturn,
) )
......
...@@ -4,18 +4,18 @@ include: gsm8k_judge_1.yaml ...@@ -4,18 +4,18 @@ include: gsm8k_judge_1.yaml
task: gsm8k_judge_2 task: gsm8k_judge_2
output_type: generate_until output_type: generate_until
output_path: output_path: '/home/mchorse/lm-evaluation-harness/gsm8k_resps/meta-llama__Meta-Llama-3-8B-Instruct/samples_gsm8k_cot_llama_2024-08-25T21-59-12.123082.jsonl'
#doc_to_text: "Question: {{question}}\nAnswer:" #doc_to_text: "Question: {{question}}\nAnswer:"
doc_to_text: 'Given the following question and reference answer, verify if the attempted answer is correct. If it is, return "The answer is Correct". If it is incorrect, return "The answer is Incorrect".\nQuestion: {{question}}\nReference Answer: {{answer}}\nAnswer Attempt: {{resp}}' doc_to_text: 'Given the following question and reference answer, verify if the attempted answer is correct. If it is, return "The answer is Correct". If it is incorrect, return "The answer is Incorrect".\nQuestion: {{question}}\nReference Answer: {{answer}}\nAnswer Attempt: {{resp}}'
target_delimiter: "\n" # target_delimiter: "\n"
doc_to_target: "The answer is Correct" #" {{answer.split('### ')[-1].rstrip()}}" doc_to_target: "Correct" #" {{answer.split('### ')[-1].rstrip()}}"
metric_list: metric_list:
- metric: exact_match - metric: exact_match
aggregation: mean aggregation: mean
higher_is_better: true higher_is_better: true
ignore_case: true ignore_case: true
ignore_punctuation: false ignore_punctuation: true
regexes_to_ignore: regexes_to_ignore:
- "," - ","
- "\\$" - "\\$"
...@@ -35,9 +35,7 @@ filter_list: ...@@ -35,9 +35,7 @@ filter_list:
- name: "test" - name: "test"
filter: filter:
- function: "regex" - function: "regex"
regex_pattern: "The answer is (Correct|Incorrect)" regex_pattern: "(Correct|Incorrect)"
ignore_punctuation: true
ignore_case: true
- function: "take_first" - function: "take_first"
metadata: metadata:
version: 3.0 version: 3.0
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment