"...lm-evaluation-harness.git" did not exist on "d79a4389e4e1eeb83bc9430514b3cc0f77477fd0"
Commit 2b56339e authored by Baber's avatar Baber
Browse files

Merge branch 'main' into longcxt

parents 0b533339 703fbffd
import evaluate as hf_evaluate
try:
compute_ = hf_evaluate.load("code_eval")
test_cases = ["assert add(2, 3)==5"]
candidates = [["def add(a,b): return a*b"]]
results = compute_.compute(references=test_cases, predictions=candidates, k=[1])
except Exception as e:
raise e
def pass_at_k(references: list[str], predictions: list[list[str]], k: list[int] = None):
global compute_
assert k is not None
if isinstance(k, int):
k = [k]
res = compute_.compute(
references=references,
predictions=predictions,
k=k,
)
return res[0]
def build_predictions(resps: list[list[str]], docs: list[dict]) -> list[list[str]]:
return [[doc["prompt"] + r for r in resp] for resp, doc in zip(resps, docs)]
# Task-name
### Paper
Title: LLAMA Evals
Abstract: Evals reproducing those provided by the LLAMA team in the Hugging Face repo.
`Short description of paper / benchmark goes here:`
Homepage: `https://huggingface.co/collections/meta-llama/llama-31-evals-66a2c5a14c2093e58298ac7f`
Note: The tasks are formatted to be run with apply_chat_template and fewshot_as_multiturn.
### Citation
```
BibTeX-formatted citation goes here
```
### Groups, Tags, and Tasks
#### Groups
* `group_name`: `Short description`
#### Tags
* `tag_name`: `Short description`
#### Tasks
* `mmlu_llama`: `generation variant of MMLU`
* `arc_chalenge_chat`: `generation variant of ARC-Challenge using MMLU format`
### Checklist
For adding novel benchmarks/datasets to the library:
* [ ] Is the task an existing benchmark in the literature?
* [ ] Have you referenced the original paper that introduced the task?
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
dataset_path: hails/mmlu_no_train # a copy of `cais/mmlu` with no auxiliary_train split
output_type: generate_until
test_split: test
fewshot_split: dev
fewshot_config:
sampler: first_n
doc_to_text: "Given the following question and four candidate answers (A, B, C and D), choose the best answer.\nQuestion: {{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nYour response should end with \"The best answer is [the_answer_letter]\" where the [the_answer_letter] is one of A, B, C or D."
assistant_prefill: "The best answer is"
doc_to_target: "{{['A.','B.','C.','D.'][answer]}}"
num_fewshot: 5
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
regexes_to_ignore:
- "\\$"
- "\\.$"
generation_kwargs:
until:
- "."
max_gen_toks: 10
filter_list:
- name: strict_match
filter:
- function: remove_whitespace
- function: take_first
metadata:
version: 1.0
dataset_kwargs:
trust_remote_code: true
group: mmlu_llama_humanities
group_alias: humanities
task:
- mmlu_llama_humanities_tasks
aggregate_metric_list:
- metric: exact_match
aggregation: mean
weight_by_size: True
filter_list: [strict_match]
metadata:
version: 1
group: mmlu_llama_other
group_alias: other
task:
- mmlu_llama_other_tasks
aggregate_metric_list:
- metric: exact_match
aggregation: mean
weight_by_size: True
filter_list: [strict_match]
metadata:
version: 1
group: mmlu_llama_social_sciences
group_alias: social sciences
task:
- mmlu_llama_social_sciences_tasks
aggregate_metric_list:
- metric: exact_match
aggregation: mean
weight_by_size: True
filter_list: [strict_match]
metadata:
version: 1
group: mmlu_llama_stem
group_alias: stem
task:
- mmlu_llama_stem_tasks
aggregate_metric_list:
- metric: exact_match
aggregation: mean
weight_by_size: True
filter_list: [strict_match]
metadata:
version: 0
group: mmlu_llama
task:
- mmlu_llama_stem
- mmlu_llama_other
- mmlu_llama_social_sciences
- mmlu_llama_humanities
aggregate_metric_list:
- metric: exact_match
aggregation: mean
weight_by_size: True
filter_list: [strict_match]
metadata:
version: 1
"dataset_name": "abstract_algebra"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_stem_tasks"
"task": "mmlu_llama_abstract_algebra"
"task_alias": "abstract algebra"
"dataset_name": "anatomy"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_stem_tasks"
"task": "mmlu_llama_anatomy"
"task_alias": "anatomy"
"dataset_name": "astronomy"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_stem_tasks"
"task": "mmlu_llama_astronomy"
"task_alias": "astronomy"
"dataset_name": "business_ethics"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_other_tasks"
"task": "mmlu_llama_business_ethics"
"task_alias": "business ethics"
"dataset_name": "clinical_knowledge"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_other_tasks"
"task": "mmlu_llama_clinical_knowledge"
"task_alias": "clinical knowledge"
"dataset_name": "college_biology"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_stem_tasks"
"task": "mmlu_llama_college_biology"
"task_alias": "college biology"
"dataset_name": "college_chemistry"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_stem_tasks"
"task": "mmlu_llama_college_chemistry"
"task_alias": "college chemistry"
"dataset_name": "college_computer_science"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_stem_tasks"
"task": "mmlu_llama_college_computer_science"
"task_alias": "college computer science"
"dataset_name": "college_mathematics"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_stem_tasks"
"task": "mmlu_llama_college_mathematics"
"task_alias": "college mathematics"
"dataset_name": "college_medicine"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_other_tasks"
"task": "mmlu_llama_college_medicine"
"task_alias": "college medicine"
"dataset_name": "college_physics"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_stem_tasks"
"task": "mmlu_llama_college_physics"
"task_alias": "college physics"
"dataset_name": "computer_security"
"include": "_continuation_template_yaml"
"tag": "mmlu_llama_stem_tasks"
"task": "mmlu_llama_computer_security"
"task_alias": "computer security"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment