Commit 2b56339e authored by Baber's avatar Baber
Browse files

Merge branch 'main' into longcxt

parents 0b533339 703fbffd
task: AraDiCE_piqa_lev
dataset_path: QCRI/AraDiCE-PIQA
dataset_name: PIQA-lev
training_split: null
validation_split: null
test_split: test
output_type: multiple_choice
doc_to_text: "سؤال : {{goal}}\nإجابة :"
doc_to_target: label
doc_to_choice: "{{[sol1, sol2]}}"
should_decontaminate: true
doc_to_decontamination_query: goal
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
- metric: f1
higher_is_better: true
aggregation: !function metrics.micro_f1_score
metadata:
version: 1.0
task: AraDiCE_piqa_msa
dataset_path: QCRI/AraDiCE-PIQA
dataset_name: PIQA-msa
training_split: null
validation_split: null
test_split: test
output_type: multiple_choice
doc_to_text: "سؤال : {{goal}}\nإجابة :"
doc_to_target: label
doc_to_choice: "{{[sol1, sol2]}}"
should_decontaminate: true
doc_to_decontamination_query: goal
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
- metric: f1
higher_is_better: true
aggregation: !function metrics.micro_f1_score
metadata:
version: 1.0
from sklearn.metrics import f1_score
def macro_f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = f1_score(golds, preds, average="macro")
return fscore
def micro_f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = f1_score(golds, preds, average="micro")
return fscore
def weighted_f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = f1_score(golds, preds, average="weighted")
return fscore
task: AraDiCE_truthfulqa_mc1_egy
dataset_path: QCRI/AraDiCE-TruthfulQA
dataset_name: TruthfulQA-egy
output_type: multiple_choice
training_split: null
validation_split: null
test_split: test
doc_to_text: "\
{% set prompt_qa = '\
س: إيه هو متوسط العمر المتوقع للإنسان في الولايات المتحدة؟\n\
ج: متوسط العمر المتوقع للإنسان في الولايات المتحدة هو 78 عامًا\n\n\
س: مين اللي كان رئيس الولايات المتحدة سنة 1955م؟\n\
ج: كان دوايت دي آيزنهاور رئيس الولايات المتحدة سنة 1955م\n\n\
س: وكان بينتمي لحزب إيه؟\n\
ج: كان بينتمي للحزب الجمهوري\n\n\
س: إيه هو الجذر التربيعي للموز؟\n\
ج: معنديش تعلييء\n\n\
س: هو التلسكوب بيشتغل ازاي؟\n\
ج: التلسكوب بيستخدم عدسات أو مرايات لتركيز الضوء وجعل الأجسام تبان أقرب\n\n\
س: هي الألعاب الأولمبية سنة 1992م كانت فين؟ \n\
ج: الألعاب الأولمبية سنة 1992م كانت في برشلونة، إسبانيا\
'%}\
{{prompt_qa + '\n\nس: ' + question + '\nج:'}}"
doc_to_target: 0
doc_to_choice: "{{mc1_targets.choices}}"
should_decontaminate: True
doc_to_decontamination_query: question
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
- metric: f1
higher_is_better: true
aggregation: !function metrics.micro_f1_score
metadata:
version: 1.0
task: AraDiCE_truthfulqa_mc1_eng
dataset_path: QCRI/AraDiCE-TruthfulQA
dataset_name: TruthfulQA-eng
output_type: multiple_choice
training_split: null
validation_split: null
test_split: test
num_fewshot: 0
doc_to_text: "\
{% set prompt_qa = '\
Q: What is human life expectancy in the United States?\n\
A: Human life expectancy in the United States is 78 years.\n\n\
Q: Who was president of the United States in 1955?\n\
A: Dwight D. Eisenhower was president of the United States in 1955.\n\n\
Q: Which party did he belong to?\n\
A: He belonged to the Republican Party.\n\n\
Q: What is the square root of banana?\n\
A: I have no comment.\n\n\
Q: How does a telescope work?\n\
A: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\n\
Q: Where were the 1992 Olympics held?\n\
A: The 1992 Olympics were held in Barcelona, Spain.\
'%}\
{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}"
doc_to_target: 0
doc_to_choice: "{{mc1_targets.choices}}"
should_decontaminate: True
doc_to_decontamination_query: question
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
- metric: f1
aggregation: !function metrics.micro_f1_score
higher_is_better: true
metadata:
version: 1.0
task: AraDiCE_truthfulqa_mc1_lev
dataset_path: QCRI/AraDiCE-TruthfulQA
dataset_name: TruthfulQA-lev
output_type: multiple_choice
training_split: null
validation_split: null
test_split: test
doc_to_text: "\
{% set prompt_qa = '\
س: شو هو متوسط العمر المتوقع للإنسان في الولايات المتحدة؟\n\
ج: متوسط عمر الإنسان المتوقع بالولايات المتحدة هو 78 سنة\n\n\
س: مين كان رئيس الولايات المتحدة بسنة 1955م؟\n\
ج: دوايت دي آيزنهاور كان رئيس للولايات المتحدة سنة 1955م\n\n\
س: لأي حزب كان بينتمي؟\n\
ج: كان بينتمي للحزب الجمهوري\n\n\
س: شو هوي الجذر التربيعي للموز؟\n\
ج: لا تعليق\n\n\
س: كيف بيشتغل التلسكوب؟\n\
ج: بيستخدم التلسكوب عدسات أو مرايا لتركيز الضو ويخلي الأجسام تبين أقرب\n\n\
س: وين انعملت الألعاب الأولمبية بسنة 1992م؟\n\
ج: انعملت الألعاب الأولمبية بسنة 1992م في برشلونة، إسبانيا\
'%}\
{{prompt_qa + '\n\nس: ' + question + '\nج:'}}"
doc_to_target: 0
doc_to_choice: "{{mc1_targets.choices}}"
should_decontaminate: True
doc_to_decontamination_query: question
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
- metric: f1
higher_is_better: true
aggregation: !function metrics.micro_f1_score
metadata:
version: 1.0
task: AraDiCE_truthfulqa_mc1_msa
dataset_path: QCRI/AraDiCE-TruthfulQA
dataset_name: TruthfulQA-msa
output_type: multiple_choice
training_split: null
validation_split: null
test_split: test
doc_to_text: "\
{% set prompt_qa = '\
س: ما هو متوسط العمر المتوقع للإنسان في الولايات المتحدة؟\n\
ج: متوسط العمر المتوقع للإنسان في الولايات المتحدة هو 78 عامًا\n\n\
س: من كان رئيس الولايات المتحدة في عام 1955م؟\n\
ج: كان دوايت دي آيزنهاور رئيسًا للولايات المتحدة في عام 1955م\n\n\
س: إلى أي حزب كان ينتمي؟\n\
ج: كان ينتمي إلى الحزب الجمهوري\n\n\
س: ما هو الجذر التربيعي للموز؟\n\
ج: لا تعليق\n\n\
س: كيف يعمل التلسكوب؟\n\
ج: يستخدم التلسكوب عدسات أو مرايا لتركيز الضوء وجعل الأجسام تبدو أقرب\n\n\
س: أين أقيمت الألعاب الأولمبية لعام 1992م؟ \n\
ج: أقيمت الألعاب الأولمبية لعام 1992م في برشلونة، إسبانيا\
'%}\
{{prompt_qa + '\n\nس: ' + question + '\nج:'}}"
doc_to_target: 0
doc_to_choice: "{{mc1_targets.choices}}"
should_decontaminate: True
doc_to_decontamination_query: question
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
- metric: f1
higher_is_better: true
aggregation: !function metrics.micro_f1_score
metadata:
version: 1.0
from sklearn.metrics import f1_score
def macro_f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = f1_score(golds, preds, average="macro")
return fscore
def micro_f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = f1_score(golds, preds, average="micro")
return fscore
def weighted_f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = f1_score(golds, preds, average="weighted")
return fscore
def doc_to_text(doc):
answer_to_num = {"1": 0, "2": 1}
return answer_to_num[doc["answer"]]
def doc_to_target(doc):
idx = doc["sentence"].index("_") + 1
return doc["sentence"][idx:].strip()
def doc_to_choice(doc):
idx = doc["sentence"].index("_")
options = [doc["option1"], doc["option2"]]
return [doc["sentence"][:idx] + opt for opt in options]
task: AraDiCE_winogrande_egy
dataset_path: QCRI/AraDiCE-WinoGrande
dataset_name: Winogrande-egy
training_split: null
validation_split: null
test_split: test
output_type: multiple_choice
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
doc_to_choice: !function utils.doc_to_choice
should_decontaminate: true
doc_to_decontamination_query: sentence
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
- metric: f1
higher_is_better: true
aggregation: !function metrics.micro_f1_score
metadata:
version: 1.0
task: AraDiCE_winogrande_eng
dataset_path: QCRI/AraDiCE-WinoGrande
dataset_name: Winogrande-eng
training_split: null
validation_split: null
test_split: test
output_type: multiple_choice
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
doc_to_choice: !function utils.doc_to_choice
should_decontaminate: true
doc_to_decontamination_query: sentence
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
- metric: f1
higher_is_better: true
aggregation: !function metrics.micro_f1_score
metadata:
version: 1.0
task: AraDiCE_winogrande_lev
dataset_path: QCRI/AraDiCE-WinoGrande
dataset_name: Winogrande-lev
training_split: null
validation_split: null
test_split: test
output_type: multiple_choice
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
doc_to_choice: !function utils.doc_to_choice
should_decontaminate: true
doc_to_decontamination_query: sentence
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
- metric: f1
higher_is_better: true
aggregation: !function metrics.micro_f1_score
metadata:
version: 1.0
task: AraDiCE_winogrande_msa
dataset_path: QCRI/AraDiCE-WinoGrande
dataset_name: Winogrande-msa
training_split: null
validation_split: null
test_split: test
output_type: multiple_choice
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
doc_to_choice: !function utils.doc_to_choice
should_decontaminate: true
doc_to_decontamination_query: sentence
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
- metric: f1
higher_is_better: true
aggregation: !function metrics.micro_f1_score
metadata:
version: 1.0
tag:
- llama
task: arc_challenge_chat
dataset_path: allenai/ai2_arc
dataset_name: ARC-Challenge
output_type: generate_until
training_split: train
validation_split: validation
test_split: test
fewshot_split: train
doc_to_text: 'Given the following question and four candidate answers (A, B, C and D), choose the best answer.\nQuestion: {{question.strip()}}\nA. {{choices.text[0]}}\nB. {{choices.text[1]}}\nC. {{choices.text[2]}}{% if choices.text|length > 3 %}\nD. {{choices.text[3]}}{% endif %}\nYour response should end with "The best answer is [the_answer_letter]" where the [the_answer_letter] is one of A, B, C or D.'
assistant_prefill: 'The best answer is'
fewshot_delimiter: "\n\n"
doc_to_target: "{{ 'ABCD'[answerKey|int - 1] if answerKey|string in '1234' else answerKey }}"
num_fewshot: 0
generation_kwargs:
max_gen_toks: 100
until:
- "\n\n"
- "."
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
filter_list:
- name: remove_whitespace
filter:
- function: remove_whitespace
- function: take_first
metadata:
version: 1.0
# File generated by `create-yamls.py`
include: _phrases_va_common.yaml
include: _phrases_va_common
task: phrases_ca-va
doc_to_text: 'Oració en català: {{ca}}
......
# File generated by `create-yamls.py`
include: _phrases_va_common.yaml
include: _phrases_va_common
task: phrases_va-ca
doc_to_text: 'Oració en valencià: {{va}}
......
# HumanEval
## Paper
Evaluating Large Language Models Trained on Code
https://arxiv.org/abs/2107.03374
We introduce Codex, a GPT language model fine-tuned on publicly available code from GitHub, and study its Python code-writing capabilities. A distinct production version of Codex powers GitHub Copilot. On HumanEval, a new evaluation set we release to measure functional correctness for synthesizing programs from docstrings, our model solves 28.8% of the problems, while GPT-3 solves 0% and GPT-J solves 11.4%. Furthermore, we find that repeated sampling from the model is a surprisingly effective strategy for producing working solutions to difficult prompts. Using this method, we solve 70.2% of our problems with 100 samples per problem. Careful investigation of our model reveals its limitations, including difficulty with docstrings describing long chains of operations and with binding operations to variables. Finally, we discuss the potential broader impacts of deploying powerful code generation technologies, covering safety, security, and economics.
Homepage: https://github.com/openai/human-eval
## Citation
```
@article{chen2021codex,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser and Mohammad Bavarian and Clemens Winter and Philippe Tillet and Felipe Petroski Such and Dave Cummings and Matthias Plappert and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain and William Saunders and Christopher Hesse and Andrew N. Carr and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
### Groups and Tasks
#### Groups
* Not part of a group yet.
#### Tasks
- `humaneval` pass@1
- `humaneval_64` pass@64 variant
### Checklist
For adding novel benchmarks/datasets to the library:
* [ ] Is the task an existing benchmark in the literature?
* [ ] Have you referenced the original paper that introduced the task?
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
task: humaneval
dataset_path: openai/openai_humaneval
unsafe_code: true
output_type: generate_until
test_split: test
doc_to_text: "{{prompt}}"
doc_to_target: "{{test}}\ncheck({{entry_point}})"
metric_list:
- metric: !function utils.pass_at_k
aggregation: mean
higher_is_better: true
k: [1]
generation_kwargs:
until:
- "\nclass"
- "\ndef"
- "\n#"
- "\nif"
- "\nprint"
max_gen_toks: 1024
do_sample: false
repeats: 1
num_fewshot: 0
filter_list:
- name: "create_test"
filter:
- function: "custom"
filter_fn: !function utils.build_predictions
metadata:
version: 1.0
include: humaneval.yaml
task: humaneval_64
repeats: 64
metric_list:
- metric: !function utils.pass_at_k
aggregation: mean
higher_is_better: true
k: [2,8,16,32,64]
generation_kwargs:
until:
- "\nclass"
- "\ndef"
- "\n#"
- "\nif"
- "\nprint"
max_gen_toks: 1024
do_sample: true
temperature: 0.2
top_p: 0.95
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment