Commit ab96fc7e authored by lintangsutawika's avatar lintangsutawika
Browse files

merged with latest update

parents bf2517cc 8680e938
...@@ -3,3 +3,13 @@ ...@@ -3,3 +3,13 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_object_counting" "task": "bbh_zeroshot_object_counting"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.NumberParseRegexFilter
group_select: 0
regex_pattern: "([-0-9]+)"
- function: "take_first"
...@@ -3,3 +3,15 @@ ...@@ -3,3 +3,15 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_penguins_in_a_table" "task": "bbh_zeroshot_penguins_in_a_table"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.MultiChoiceRegexFilter
group_select: 0
ignore_case: true
ignore_punctuation: true
regex_pattern: "(\\([A-Z]\\))"
- function: "take_first"
...@@ -3,3 +3,15 @@ ...@@ -3,3 +3,15 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_reasoning_about_colored_objects" "task": "bbh_zeroshot_reasoning_about_colored_objects"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.MultiChoiceRegexFilter
group_select: 0
ignore_case: true
ignore_punctuation: true
regex_pattern: "(\\([A-Z]\\))"
- function: "take_first"
...@@ -3,3 +3,15 @@ ...@@ -3,3 +3,15 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_ruin_names" "task": "bbh_zeroshot_ruin_names"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.MultiChoiceRegexFilter
group_select: 0
ignore_case: true
ignore_punctuation: true
regex_pattern: "(\\([A-Z]\\))"
- function: "take_first"
...@@ -3,3 +3,15 @@ ...@@ -3,3 +3,15 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_salient_translation_error_detection" "task": "bbh_zeroshot_salient_translation_error_detection"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.MultiChoiceRegexFilter
group_select: 0
ignore_case: true
ignore_punctuation: true
regex_pattern: "(\\([A-Z]\\))"
- function: "take_first"
...@@ -3,3 +3,15 @@ ...@@ -3,3 +3,15 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_snarks" "task": "bbh_zeroshot_snarks"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.MultiChoiceRegexFilter
group_select: 0
ignore_case: true
ignore_punctuation: true
regex_pattern: "(\\([A-Z]\\))"
- function: "take_first"
...@@ -3,3 +3,17 @@ ...@@ -3,3 +3,17 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_sports_understanding" "task": "bbh_zeroshot_sports_understanding"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.MapRegexFilter
group_select: 0
ignore_case: true
regex_pattern_to_value:
\b(no|not plausible)\b: "no"
\b(yes|plausible)\b: "yes"
- function: "take_first"
...@@ -3,3 +3,15 @@ ...@@ -3,3 +3,15 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_temporal_sequences" "task": "bbh_zeroshot_temporal_sequences"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.MultiChoiceRegexFilter
group_select: 0
ignore_case: true
ignore_punctuation: true
regex_pattern: "(\\([A-Z]\\))"
- function: "take_first"
...@@ -3,3 +3,15 @@ ...@@ -3,3 +3,15 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_tracking_shuffled_objects_five_objects" "task": "bbh_zeroshot_tracking_shuffled_objects_five_objects"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.MultiChoiceRegexFilter
group_select: 0
ignore_case: true
ignore_punctuation: true
regex_pattern: "(\\([A-Z]\\))"
- function: "take_first"
...@@ -3,3 +3,15 @@ ...@@ -3,3 +3,15 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_tracking_shuffled_objects_seven_objects" "task": "bbh_zeroshot_tracking_shuffled_objects_seven_objects"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.MultiChoiceRegexFilter
group_select: 0
ignore_case: true
ignore_punctuation: true
regex_pattern: "(\\([A-Z]\\))"
- function: "take_first"
...@@ -3,3 +3,15 @@ ...@@ -3,3 +3,15 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_tracking_shuffled_objects_three_objects" "task": "bbh_zeroshot_tracking_shuffled_objects_three_objects"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.MultiChoiceRegexFilter
group_select: 0
ignore_case: true
ignore_punctuation: true
regex_pattern: "(\\([A-Z]\\))"
- function: "take_first"
import collections
import re
import sys
import unicodedata
from lm_eval.filters.extraction import RegexFilter, Filter
class ExtendedRegexFilter(RegexFilter):
punct_tbl = dict.fromkeys(i for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
def __init__(
self, regex_pattern: str = r"#### (\-?[0-9\.\,]+)", group_select=0, fallback: str = "[invalid]",
ignore_case=False, ignore_punctuation=False, regexes_to_ignore=None,
) -> None:
super().__init__(regex_pattern, group_select, fallback)
self.ignore_case = ignore_case
self.ignore_punctuation = ignore_punctuation
self.regexes_to_ignore = regexes_to_ignore
def filter_ignores(self, st):
if self.regexes_to_ignore is not None:
for s in self.regexes_to_ignore:
st = re.sub(s, "", st)
if self.ignore_case:
st = st.lower()
if self.ignore_punctuation:
# https://stackoverflow.com/a/266162
st = st.translate(self.punct_tbl)
return st
def find_match(self, regex, resp, convert_dict={}):
match = regex.findall(resp)
if match:
match = match[self.group_select]
if isinstance(match, tuple):
match = [m for m in match if m][0]
match = match.strip()
if match and match in convert_dict:
match = convert_dict[match]
return match
class MapRegexFilter(ExtendedRegexFilter):
def __init__(
self, regex_pattern_to_value: dict = {}, group_select=0, fallback: str = "[invalid]",
ignore_case=False, ignore_punctuation=False, regexes_to_ignore=None,
) -> None:
"""
regex_pattern_to_value: Match the regex pattern and change the result into the value
group_select: Selects the (group_select)th match from the findall result. We use the whole regex_patterns, concatenated by |
ignore_case: Lowers the case of response before matching with the given regex
ignore_punctuation: Remove the punctuation before matching with the given regex
regexes_to_ignore: Remove these regexes before matching with the given regex
"""
super().__init__('|'.join(list(regex_pattern_to_value.keys())), group_select, fallback, ignore_case, ignore_punctuation, regexes_to_ignore)
self.regex_to_value = {re.compile(r): v for r, v in regex_pattern_to_value.items()}
def apply(self, resps, docs):
filtered_resps = []
for r in resps:
filtered = []
for resp in r:
whole_match_considering_group_select = self.find_match(self.regex, self.filter_ignores(resp))
if whole_match_considering_group_select:
for regex, mapped_value in self.regex_to_value.items():
match = self.find_match(regex, self.filter_ignores(whole_match_considering_group_select))
if match:
match = mapped_value
break
if not whole_match_considering_group_select or not match:
match = self.fallback
filtered.append(match)
filtered_resps.append(filtered)
return filtered_resps
class NumberParseRegexFilter(ExtendedRegexFilter):
def apply(self, resps, docs):
# here, we assume we have a list, in which each element is
# a list of model responses for some particular input/target pair.
# so we process each of these (same input/target response sets)
# independently (and keep them a list.)
filtered_resps = []
import regex
from word2number import w2n
# https://www.reddit.com/r/regex/comments/11a38uk/parsing_numbers_written_out_as_english_words
english_number_regex = regex.compile(
"((?:(?:zero|one|two|three|four|five|(?:twen|thir|for|fif|six|seven|nine)(?|teen|ty)|eight(?:|een|y)|ten|eleven|twelve|fourteen|hundred|thousand|(?:m|b|tr)illion)(?:zero|one|two|three|four|five|(?:twen|thir|for|fif|six|seven|nine)(?:|teen|ty)|eight(?|een|y)|ten|eleven|twelve|fourteen|hundred|thousand|(?:m|b|tr)illion|[^\S\r\n]|,|and|&)+)?(?:zero|one|two|three|four|five|(?:twen|thir|for|fif|six|seven|nine)(?|teen|ty)|eight(?|een|y)|ten|eleven|twelve|fourteen|hundred|thousand|(?:m|b|tr)illion))")
for r in resps:
filtered = []
for resp in r:
match = self.find_match(self.regex, resp)
if not match:
match = self.find_match(english_number_regex, resp.lower())
if match:
match = str(w2n.word_to_num(match))
if not match:
match = self.fallback
filtered.append(match)
filtered_resps.append(filtered)
return filtered_resps
class WordSortFilter(Filter):
""" """
def apply(self, resps, docs):
filtered_resps = []
for r, doc in zip(resps, docs):
words = doc['input'].split("List:")[1].strip().split()
regex = re.compile('|'.join([f"\\b{w}\\b" for w in words]))
filtered = []
for resp in r:
match = regex.findall(resp)
match.reverse()
ordered_words = reversed(collections.OrderedDict(zip(match, [None] * len(match))))
filtered.append(' '.join(ordered_words))
filtered_resps.append(filtered)
return filtered_resps
class MultiChoiceRegexFilter(ExtendedRegexFilter):
def __init__(self, *args, **kwargs):
"""
regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure
- step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response.
- step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices.
group_select: Selects the (group_select)th match from the findall result.
ignore_case: Ignores the case during step 1 matching
ignore_punctuation: Remove the punctuation during step 1 matching
regexes_to_ignore: Remove these regexes during step 1 matching
"""
super().__init__(*args, **kwargs)
def apply(self, resps, docs):
# here, we assume we have a list, in which each element is
# a list of model responses for some particular input/target pair.
# so we process each of these (same input/target response sets)
# independently (and keep them a list.)
filtered_resps = []
for r, doc in zip(resps, docs):
fallback_regexes = []
choice_to_alpha = {}
next_alpha = 'A'
without_paren_fallback_regexes = []
without_paren_to_target = {}
multiple_choices_regex = re.compile(r"\([A-Z]\)([^\n^(]*)")
match = multiple_choices_regex.findall(doc['input'])
for m in match:
m = self.filter_ignores(m.strip())
fallback_regexes.append(f"{re.escape(m)}")
choice_to_alpha[m] = f"({next_alpha})"
without_paren_fallback_regexes.append(next_alpha)
without_paren_to_target[next_alpha] = f"({next_alpha})"
next_alpha = chr(ord(next_alpha) + 1)
fallback_regex = re.compile('|'.join(fallback_regexes))
without_paren_fallback_regex = '|'.join(without_paren_fallback_regexes)
without_paren_fallback_regex = re.compile(f":[\s]*({without_paren_fallback_regex})")
filtered = []
for resp in r:
match = self.find_match(self.regex, resp)
if not match:
match = self.find_match(fallback_regex, self.filter_ignores(resp), choice_to_alpha)
if not match:
match = self.find_match(without_paren_fallback_regex, resp, without_paren_to_target)
if not match:
match = self.fallback
filtered.append(match)
filtered_resps.append(filtered)
return filtered_resps
...@@ -3,3 +3,16 @@ ...@@ -3,3 +3,16 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_web_of_lies" "task": "bbh_zeroshot_web_of_lies"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.MapRegexFilter
group_select: 0
ignore_case: true
regex_pattern_to_value:
\b(no|does not tell the truth|is not telling the truth)\b: "no"
\b(yes|tells the truth|is telling the truth)\b: "yes"
- function: "take_first"
...@@ -3,3 +3,11 @@ ...@@ -3,3 +3,11 @@
"doc_to_text": "Q: {{input}}\nA:" "doc_to_text": "Q: {{input}}\nA:"
"include": "_zeroshot_template_yaml" "include": "_zeroshot_template_yaml"
"task": "bbh_zeroshot_word_sorting" "task": "bbh_zeroshot_word_sorting"
filter_list:
- name: "strict-match"
filter:
- function: "take_first"
- name: "flexible-extract"
filter:
- function: !function utils.WordSortFilter
- function: "take_first"
group:
- math_word_problems
task: gsm8k_cot_zeroshot
dataset_path: gsm8k
dataset_name: main
output_type: generate_until
training_split: train
fewshot_split: train
test_split: test
doc_to_text: "Q: {{question}}\nA: Let's think step by step."
doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: false
regexes_to_ignore:
- ","
- "\\$"
- "(?s).*#### "
- "\\.$"
generation_kwargs:
until:
- "Q:"
- "</s>"
- "<|im_end|>"
do_sample: false
repeats: 1
num_fewshot: 0
filter_list:
- name: "strict-match"
filter:
- function: "regex"
regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)."
- function: "take_first"
- name: "flexible-extract"
filter:
- function: "regex"
group_select: -1
regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
- function: "take_first"
metadata:
version: 3.0
...@@ -25,20 +25,27 @@ metric_list: ...@@ -25,20 +25,27 @@ metric_list:
- "," - ","
- "\\$" - "\\$"
- "(?s).*#### " - "(?s).*#### "
- "\n\n" - "\\.$"
generation_kwargs: generation_kwargs:
until: until:
- "Q:" - "Q:"
- "\n\n" - "</s>"
- "<|im_end|>"
do_sample: false do_sample: false
repeats: 1 repeats: 1
num_fewshot: 0 num_fewshot: 0
filter_list: filter_list:
- name: "get-answer" - name: "strict-match"
filter: filter:
- function: "regex" - function: "regex"
regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)." regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)."
- function: "take_first" - function: "take_first"
- name: "flexible-extract"
filter:
- function: "regex"
group_select: -1
regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
- function: "take_first"
metadata: metadata:
version: 2.0 version: 3.0
num_fewshot: 8 num_fewshot: 8
...@@ -19,19 +19,27 @@ metric_list: ...@@ -19,19 +19,27 @@ metric_list:
- "," - ","
- "\\$" - "\\$"
- "(?s).*#### " - "(?s).*#### "
- "\\.$"
generation_kwargs: generation_kwargs:
until: until:
- "\n\n"
- "Question:" - "Question:"
- "</s>"
- "<|im_end|>"
do_sample: false do_sample: false
temperature: 0.0 temperature: 0.0
repeats: 1 repeats: 1
num_fewshot: 5 num_fewshot: 5
filter_list: filter_list:
- name: "get-answer" - name: "strict-match"
filter: filter:
- function: "regex" - function: "regex"
regex_pattern: "#### (\\-?[0-9\\.\\,]+)" regex_pattern: "#### (\\-?[0-9\\.\\,]+)"
- function: "take_first" - function: "take_first"
- name: "flexible-extract"
filter:
- function: "regex"
group_select: -1
regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
- function: "take_first"
metadata: metadata:
version: 2.0 version: 3.0
# HAE-RAE BENCH
### Paper
Title: `HAE-RAE Bench: Evaluation of Korean Knowledge in Language Models`
Abstract: `Large Language Models (LLMs) trained on massive corpora demonstrate impressive capabilities in a wide range of tasks. While there are ongoing efforts to adapt these models to languages beyond English, the attention given to their evaluation methodologies remains limited. Current multilingual benchmarks often rely on back translations or re-implementations of English tests, limiting their capacity to capture unique cultural and linguistic nuances. To bridge this gap for the Korean language, we introduce HAE-RAE Bench, a dataset curated to challenge models lacking Korean cultural and contextual depth. The dataset encompasses six downstream tasks across four domains: vocabulary, history, general knowledge, and reading comprehension. Contrary to traditional evaluation suites focused on token or sequence classification and specific mathematical or logical reasoning, HAE-RAE Bench emphasizes a model's aptitude for recalling Korean-specific knowledge and cultural contexts. Comparative analysis with prior Korean benchmarks indicates that the HAE-RAE Bench presents a greater challenge to non-native models, by disturbing abilities and knowledge learned from English being transferred.`
Homepage: https://huggingface.co/datasets/HAERAE-HUB/HAE_RAE_BENCH
### Citation
@misc{son2023haerae,
title={HAE-RAE Bench: Evaluation of Korean Knowledge in Language Models},
author={Guijin Son and Hanwool Lee and Suwan Kim and Huiseo Kim and Jaecheol Lee and Je Won Yeom and Jihyu Jung and Jung Woo Kim and Songseong Kim},
year={2023},
eprint={2309.02706},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
### Groups and Tasks
#### Groups
* `haerae`: 'It consists of five tasks provided in the HAERAE-BENCH paper. 'Reading Comprehension' was excluded from the implementation due to copyright issues. We will include it in the next haerae update. For other tasks, some part of data may be replaced or increased with the production of Haerae v1.1. Please note this when using it.'
#### Tasks
The following tasks evaluate subjects in the HaeRae dataset
- `haerae_standard_nomenclature`
- `haerae_loan_word`
- `haerae_rare_word`
- `haerae_general_knowledge`
- `haerae_history`
### Checklist
For adding novel benchmarks/datasets to the library:
* [x] Is the task an existing benchmark in the literature?
* [x] Have you referenced the original paper that introduced the task?
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
group: haerae
dataset_path: HAERAE-HUB/HAE_RAE_BENCH
test_split: test
fewshot_split: test
output_type: multiple_choice
doc_to_text: "{{query}}"
doc_to_choice: ["(A)", "(B)", "(C)", "(D)", "(E)"]
doc_to_target: "{{answer}}"
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
"dataset_name": "general_knowledge"
"include": "_default_haerae_yaml"
"task": "haerae_general_knowledge"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment