Commit e58b8182 authored by lintangsutawika's avatar lintangsutawika
Browse files

resolved merge conflict

parents d213a533 0571eeb1
from sklearn.metrics import f1_score
def weighted_f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = f1_score(golds, preds, average="weighted")
return fscore
from lm_eval.utils import weighted_f1_score
from sklearn.metrics import f1_score
from lm_eval.utils import weighted_f1_score
def doc_to_target(doc):
replacements = {0: "True", 1: "Neither", 2: "False"}
return replacements[doc["label"]]
def weighted_f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = f1_score(golds, preds, average="weighted")
return fscore
from sklearn.metrics import f1_score
from lm_eval.utils import weighted_f1_score
def doc_to_text(doc):
......@@ -17,11 +17,3 @@ def doc_to_text(doc):
def doc_to_target(doc):
replacements = {0: "entailment", 1: "neutral", 2: "contradiction"}
return replacements[doc["label"]]
def weighted_f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = f1_score(golds, preds, average="weighted")
return fscore
from sklearn.metrics import f1_score
from lm_eval.utils import weighted_f1_score
def doc_to_text(doc):
......@@ -17,11 +17,3 @@ def doc_to_text(doc):
def doc_to_target(doc):
replacements = {0: "entailment", 1: "neutral", 2: "contradiction"}
return replacements[doc["label"]]
def weighted_f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = f1_score(golds, preds, average="weighted")
return fscore
......@@ -4,7 +4,7 @@ fewshot_config:
output_type: multiple_choice
should_decontaminate: true
doc_to_decontamination_query: "{{question}}"
doc_to_text: "P: {{flores_passage}}\nQ: {{question.strip()}}\nA: {{mc_answer1}}\nB: {{mc_answer2}}\nC: {{mc_answer3}}\nD: {{mc_answer4}}\nAnswer"
doc_to_text: "P: {{flores_passage}}\nQ: {{question.strip()}}\nA: {{mc_answer1}}\nB: {{mc_answer2}}\nC: {{mc_answer3}}\nD: {{mc_answer4}}\nAnswer:"
doc_to_choice: ["A", "B", "C", "D"]
doc_to_target: "{{['1', '2', '3', '4'].index(correct_answer_num)}}"
metric_list:
......
......@@ -2,7 +2,6 @@ import re
import string
import numpy as np
from scipy.optimize import linear_sum_assignment
_ARTICLES = re.compile(r"\b(a|an|the)\b", re.UNICODE)
......@@ -117,6 +116,8 @@ def _align_bags(predicted, gold):
Takes gold and predicted answer sets and first finds the optimal 1-1 alignment
between them and gets maximum metric values over all the answers.
"""
from scipy.optimize import linear_sum_assignment
scores = np.zeros([len(gold), len(predicted)])
for gold_index, gold_item in enumerate(gold):
for pred_index, pred_item in enumerate(predicted):
......
# gsm_plus
### Paper
Title: `GSM-PLUS: A Comprehensive Benchmark for Evaluating the Robustness of LLMs as Mathematical Problem Solvers`
Abstract: `Large language models (LLMs) have achieved impressive performance across various mathematical reasoning benchmarks. However, there are increasing debates regarding whether these models truly understand and apply mathematical knowledge or merely rely on shortcuts for mathematical reasoning. One essential and frequently occurring evidence is that when the math questions are slightly changed, LLMs can behave incorrectly. This motivates us to evaluate the robustness of LLMs’ math reasoning capability by testing a wide range of question variations. We introduce the adversarial grade school math (GSM-PLUS) dataset, an extension of GSM8K augmented with various mathematical perturbations. Our experiments on 25 LLMs and 4 prompting techniques show that while LLMs exhibit different levels of math reasoning abilities, their performances are far from robust. In particular, even for problems that have been solved in GSM8K, LLMs can make mistakes when new statements are added or the question targets are altered. We also explore whether more robust performance can be achieved by composing existing prompting methods, in which we try an iterative method that generates and verifies each intermediate thought based on its reasoning goal and calculation result.`
Note: the original GSM-Plus dataset lacks a train-test split. To facilitate format compatibility with GSM 8k, the dataset is split with a test ratio of 0.1 (after shuffling).
Homepage (original): https://huggingface.co/datasets/qintongli/GSM-Plus
Homepage (preprocessed): https://huggingface.co/datasets/sjyuxyz/GSM-Plus-Formatted
### Citation
```bibtex
@misc{li2024gsmpluscomprehensivebenchmarkevaluating,
title={GSM-Plus: A Comprehensive Benchmark for Evaluating the Robustness of LLMs as Mathematical Problem Solvers},
author={Qintong Li and Leyang Cui and Xueliang Zhao and Lingpeng Kong and Wei Bi},
year={2024},
eprint={2402.19255},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2402.19255},
}
```
### Groups and Tasks
#### Groups
* Not part of a group yet
#### Tasks
The following tasks evaluate subjects in the gsm_plus dataset
- `gsm_plus`
- `gsm_plus_mini`
### Checklist
For adding novel benchmarks/datasets to the library:
* [x] Is the task an existing benchmark in the literature?
* [x] Have you referenced the original paper that introduced the task?
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
tag:
- math_word_problems
task: gsm_plus
dataset_path: qintongli/GSM-Plus
output_type: generate_until
training_split: test
fewshot_split: test
test_split: test
doc_to_text: "Question: {{question}}\nAnswer:"
doc_to_target: "{{solution}}"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: false
regexes_to_ignore:
- ","
- "\\$"
- "(?s).*#### "
- "\\.$"
generation_kwargs:
until:
- "Question:"
- "</s>"
- "<|im_end|>"
do_sample: false
temperature: 0.0
repeats: 1
num_fewshot: 5
filter_list:
- name: "strict-match"
filter:
- function: "regex"
regex_pattern: "#### (\\-?[0-9\\.\\,]+)"
- function: "take_first"
- name: "flexible-extract"
filter:
- function: "regex"
group_select: -1
regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
- function: "take_first"
metadata:
version: 1.0
tag:
- math_word_problems
task: gsm_plus_mini
dataset_path: qintongli/GSM-Plus
output_type: generate_until
training_split: testmini
fewshot_split: testmini
test_split: testmini
doc_to_text: "Question: {{question}}\nAnswer:"
doc_to_target: "{{solution}}"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: false
regexes_to_ignore:
- ","
- "\\$"
- "(?s).*#### "
- "\\.$"
generation_kwargs:
until:
- "Question:"
- "</s>"
- "<|im_end|>"
do_sample: false
temperature: 0.0
repeats: 1
num_fewshot: 5
filter_list:
- name: "strict-match"
filter:
- function: "regex"
regex_pattern: "#### (\\-?[0-9\\.\\,]+)"
- function: "take_first"
- name: "flexible-extract"
filter:
- function: "regex"
group_select: -1
regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
- function: "take_first"
metadata:
version: 1.0
group: haerae
task:
- haerae_gk
- haerae_hi
- haerae_lw
- haerae_rw
- haerae_sn
- haerae_general_knowledge
- haerae_history
- haerae_loan_word
- haerae_rare_word
- haerae_standard_nomenclature
aggregate_metric_list:
- metric: acc
aggregation: mean
......
"dataset_name": "general_knowledge"
"include": "_default_haerae_yaml"
"task": "haerae_general_knowledge"
dataset_name: general_knowledge
include: _default_haerae_yaml
task: haerae_general_knowledge
"dataset_name": "history"
"include": "_default_haerae_yaml"
"task": "haerae_history"
dataset_name: history
include: _default_haerae_yaml
task: haerae_history
"dataset_name": "loan_words"
"include": "_default_haerae_yaml"
"task": "haerae_loan_word"
dataset_name: loan_words
include: _default_haerae_yaml
task: haerae_loan_word
"dataset_name": "rare_words"
"include": "_default_haerae_yaml"
"task": "haerae_rare_word"
dataset_name: rare_words
include: _default_haerae_yaml
task: haerae_rare_word
"dataset_name": "standard_nomenclature"
"include": "_default_haerae_yaml"
"task": "haerae_standard_nomenclature"
dataset_name: standard_nomenclature
include: _default_haerae_yaml
task: haerae_standard_nomenclature
from datasets import Dataset
from sklearn.metrics import f1_score
def copa_doc_to_text(doc: dict) -> str:
......@@ -41,6 +40,8 @@ def hellaswag_process_doc(doc: Dataset) -> Dataset:
def macro_f1_score(items):
from sklearn.metrics import f1_score
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
......
# MedConceptsQA
### Paper
Title: `MedConceptsQA: Open Source Medical Concepts QA Benchmark`
Abstract: https://arxiv.org/abs/2405.07348
MedConceptsQA is a dedicated open source benchmark for medical concepts question answering. The benchmark comprises of questions of various medical concepts across different vocabularies: diagnoses, procedures, and drugs.
The questions are categorized into three levels of difficulty: easy, medium, and hard.
Our benchmark serves as a valuable resource for evaluating the
abilities of Large Language Models to interpret medical codes and distinguish
between medical concepts.
### Citation
```
@article{shoham2024medconceptsqa,
title={MedConceptsQA--Open Source Medical Concepts QA Benchmark},
author={Shoham, Ofir Ben and Rappoport, Nadav},
journal={arXiv preprint arXiv:2405.07348},
year={2024}
}
```
### Groups and Tasks
#### Groups
* `med_concepts_qa`: Contains all the QA tasks (diagnosis, procedures ,and drugs).
#### Tasks
* `med_concepts_qa_icd9cm` - ICD9-CM (diagnosis codes, ICD9 format) question-answering. This involves providing information, clarifications, and answering questions related to ICD-9-CM (International Classification of Diseases, 9th Revision, Clinical Modification) diagnosis codes.
* `med_concepts_qa_icd10cm` - ICD10-CM (diagnosis codes, ICD10 format) question-answering. This involves providing information, clarifications, and answering questions related to ICD-10-CM (International Classification of Diseases, 10th Revision, Clinical Modification) diagnosis codes.
* `med_concepts_qa_icd9proc` - ICD9-Proc (procedure codes, ICD9 format) question-answering. This involves providing information, clarifications, and answering questions related to ICD-9-PCS (International Classification of Diseases, 9th Revision, Procedure Coding System) procedure codes.
* `med_concepts_qa_icd10proc` - ICD10-Proc (procedure codes, ICD10 format) question-answering. This involves providing information, clarifications, and answering questions related to ICD-10-PCS (International Classification of Diseases, 10th Revision, Procedure Coding System) procedure codes.
* `med_concepts_qa_atc` - ATC (Anatomical Therapeutic Chemical Classification System) question-answering. This involves providing information, clarifications, and answering questions related to the ATC classification system, which is used for the classification of drugs and other medical products according to the organ or system on which they act and their therapeutic, pharmacological, and chemical properties.
dataset_path: ofir408/MedConceptsQA
output_type: multiple_choice
description: "Answer A,B,C,D according to the answer to this multiple choice question.\n"
fewshot_split: dev
fewshot_config:
sampler: first_n
num_fewshot: 4
test_split: test
doc_to_text: "{{question}}\nAnswer:"
doc_to_target: answer_id
doc_to_choice: ['A', 'B', 'C', 'D']
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
from typing import List
import yaml
def generate_yaml_content(vocab_name: str, level: str):
content = {
"dataset_name": f"{vocab_name}_{level}",
"tag": f"med_concepts_qa_{vocab_name}_tasks",
"include": "_default_template_yaml",
"task": f"med_concepts_qa_{vocab_name}_{level}",
"task_alias": f"{vocab_name}_{level}",
}
return content
def generate_yaml_files(
vocab_names: List[str], levels: List[str], file_name_prefix: str
):
for vocab_name in vocab_names:
for level in levels:
yaml_content = generate_yaml_content(vocab_name, level)
filename = f"{file_name_prefix}_{vocab_name}_{level}.yaml"
with open(filename, "w") as yaml_file:
yaml.dump(yaml_content, yaml_file, default_flow_style=False)
print(f"Done to generated {filename}")
if __name__ == "__main__":
generate_yaml_files(
vocab_names=["icd9cm", "icd10cm", "icd9proc", "icd10proc", "atc"],
levels=["easy", "medium", "hard"],
file_name_prefix="med_concepts_qa",
)
group: med_concepts_qa
task:
- med_concepts_qa_icd9cm
- med_concepts_qa_icd10cm
- med_concepts_qa_icd9proc
- med_concepts_qa_icd10proc
- med_concepts_qa_atc
aggregate_metric_list:
- metric: acc
aggregation: mean
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment