Commit 3e8135ce authored by Baber's avatar Baber
Browse files

Merge branch 'main' into comma

parents 8e560c96 0c134ee9
include: _default_click_cul_yaml
process_docs: !function utils.extract_politics
task: click_cul_politics
tag: click_cul_tasks
include: _default_click_cul_yaml
process_docs: !function utils.extract_society
task: click_cul_society
tag: click_cul_tasks
include: _default_click_cul_yaml
process_docs: !function utils.extract_tradition
task: click_cul_tradition
tag: click_cul_tasks
from typing import List
from datasets import Dataset
def get_context(doc) -> str:
ctx = doc["paragraph"]
q = doc["question"]
opt = doc["choices"]
if ctx:
res = f"주어진 맥락을 천천히 읽고, 질문에 대한 적절한 정답을 A, B, C, D 중에 골라 알파벳 하나로 답하시오.\n\n맥락: {ctx}\n질문: {q}\n보기:\nA:{opt[0]}, B: {opt[1]}, C: {opt[2]}, D: {opt[3]}\n정답:"
else:
res = f"주어진 질문을 천천히 읽고, 적절한 정답을 A, B, C, D 중에 골라 알파벳 하나로 답하시오.\n\n질문: {q}\n보기:\nA:{opt[0]}, B: {opt[1]}, C: {opt[2]}, D: {opt[3]}\n정답:"
return res
def get_target(doc) -> str:
ans = doc["answer"]
if "CSAT" in doc["id"]:
return ["A", "B", "C", "D", "E"][doc["choices"].index(ans)]
return ["A", "B", "C", "D"][doc["choices"].index(ans)]
def get_choices(doc) -> List[str]:
if "CSAT" in doc["id"]:
return ["A", "B", "C", "D", "E"]
return ["A", "B", "C", "D"]
def extract_economy(dataset: Dataset) -> Dataset:
return dataset.filter(lambda example: "economy" in example["id"].lower())
def extract_geography(dataset: Dataset) -> Dataset:
return dataset.filter(lambda example: "geography" in example["id"].lower())
def extract_history(dataset: Dataset) -> Dataset:
return dataset.filter(
lambda example: "KHB" in example["id"] or "history" in example["id"].lower()
)
def extract_law(dataset: Dataset) -> Dataset:
return dataset.filter(
lambda example: "law" in example["id"].lower() or "PSAT" in example["id"]
)
def extract_politics(dataset: Dataset) -> Dataset:
return dataset.filter(lambda example: "politics" in example["id"].lower())
def extract_kpop(dataset: Dataset) -> Dataset:
return dataset.filter(lambda example: "popular" in example["id"].lower())
def extract_society(dataset: Dataset) -> Dataset:
return dataset.filter(lambda example: "society" in example["id"].lower())
def extract_tradition(dataset: Dataset) -> Dataset:
return dataset.filter(lambda example: "tradition" in example["id"].lower())
group: click_lang
task:
- click_lang_tasks
aggregate_metric_list:
- metric: acc
aggregation: mean
weight_by_size: true
- metric: acc_norm
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
dataset_path: EunsuKim/CLIcK
test_split: train
fewshot_split: train
output_type: multiple_choice
doc_to_text: !function utils.get_context
doc_to_choice: !function utils.get_choices
doc_to_target: !function utils.get_target
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
include: _default_click_lang_yaml
process_docs: !function utils.extract_function
task: click_lang_function
tag: click_lang_tasks
include: _default_click_lang_yaml
process_docs: !function utils.extract_grammar
task: click_lang_grammar
tag: click_lang_tasks
include: _default_click_lang_yaml
process_docs: !function utils.extract_text
task: click_lang_text
tag: click_lang_tasks
from typing import List
from datasets import Dataset
def get_context(doc) -> str:
ctx = doc["paragraph"]
q = doc["question"]
opt = doc["choices"]
if ctx:
res = f"주어진 맥락을 천천히 읽고, 질문에 대한 적절한 정답을 A, B, C, D 중에 골라 알파벳 하나로 답하시오.\n\n맥락: {ctx}\n질문: {q}\n보기:\nA:{opt[0]}, B: {opt[1]}, C: {opt[2]}, D: {opt[3]}\n정답:"
else:
res = f"주어진 질문을 천천히 읽고, 적절한 정답을 A, B, C, D 중에 골라 알파벳 하나로 답하시오.\n\n질문: {q}\n보기:\nA:{opt[0]}, B: {opt[1]}, C: {opt[2]}, D: {opt[3]}\n정답:"
return res
def get_target(doc) -> str:
ans = doc["answer"]
if "CSAT" in doc["id"]:
return ["A", "B", "C", "D", "E"][doc["choices"].index(ans)]
return ["A", "B", "C", "D"][doc["choices"].index(ans)]
def get_choices(doc) -> List[str]:
if "CSAT" in doc["id"]:
return ["A", "B", "C", "D", "E"]
return ["A", "B", "C", "D"]
def extract_text(dataset: Dataset) -> Dataset:
return dataset.filter(
lambda example: "CSAT_korean_22" in example["id"]
or (
"CSAT_korean_23" in example["id"] and int(example["id"].split("_")[-1]) < 35
)
or ("TK" in example["id"] and int(example["id"].split("_")[-1]) > 4)
)
def extract_grammar(dataset: Dataset) -> Dataset:
return dataset.filter(
lambda example: (
"CSAT_korean" in example["id"]
and (
int(example["id"].split("_")[2]) < 21
and int(example["id"].split("_")[3]) > 10
)
)
or (
"Kedu_1" in example["id"]
and (
example["id"].split("_")[1] != "16"
or not (
"대화" in example["question"]
or "발화" in example["question"]
or "질의" in example["question"]
)
)
)
or ("TK" in example["id"] and int(example["id"].split("_")[-1]) < 5)
)
def extract_function(dataset: Dataset) -> Dataset:
return dataset.filter(
lambda example: (
"CSAT_korean" in example["id"]
and (
int(example["id"].split("_")[-1]) > 34
or (
int(example["id"].split("_")[2]) < 21
and int(example["id"].split("_")[3]) < 11
)
)
)
or (
"Kedu_16" in example["id"]
and (
"대화" in example["question"]
or "발화" in example["question"]
or "질의" in example["question"]
)
)
or "PSE_korean" in example["id"]
)
# Task-name
### Paper
Title: `CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation`
Abstract: https://arxiv.org/abs/2102.04664
CodeXGLUE provides benchmark datasets for multiple code understanding and generation tasks, including generating docstrings in natural language from code snippets (code2text).
### Citation
```
@inproceedings{DBLP:conf/nips/LuGRHSBCDJTLZSZ21,
author = {Shuai Lu and
Daya Guo and
Shuo Ren and
Junjie Huang and
Alexey Svyatkovskiy and
Ambrosio Blanco and
Colin B. Clement and
Dawn Drain and
Daxin Jiang and
Duyu Tang and
Ge Li and
Lidong Zhou and
Linjun Shou and
Long Zhou and
Michele Tufano and
Ming Gong and
Ming Zhou and
Nan Duan and
Neel Sundaresan and
Shao Kun Deng and
Shengyu Fu and
Shujie Liu},
editor = {Joaquin Vanschoren and
Sai{-}Kit Yeung},
title = {CodeXGLUE: {A} Machine Learning Benchmark Dataset for Code Understanding
and Generation},
booktitle = {Proceedings of the Neural Information Processing Systems Track on
Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December
2021, virtual},
year = {2021},
url = {https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c16a5320fa475530d9583c34fd356ef5-Abstract-round1.html},
timestamp = {Thu, 19 Dec 2024 22:07:31 +0100},
biburl = {https://dblp.org/rec/conf/nips/LuGRHSBCDJTLZSZ21.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
### Groups and Tasks
#### Groups
* code2text
#### Tasks
* `code2text_go`: Generate docstring in natural language from Go code snippets.
* `code2text_java`: Generate docstring in natural language from Java code snippets.
* `code2text_javascript`: Generate docstring in natural language from JavaScript code snippets.
* `code2text_php`: Generate docstring in natural language from PHP code snippets.
* `code2text_python`: Generate docstring in natural language from Python code snippets.
* `code2text_ruby`: Generate docstring in natural language from Ruby code snippets.
### Checklist
For adding novel benchmarks/datasets to the library:
* [x] Is the task an existing benchmark in the literature?
* [x] Have you referenced the original paper that introduced the task?
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
group: code2text
task:
- code2text_go
- code2text_java
- code2text_javascript
- code2text_php
- code2text_python
- code2text_ruby
aggregate_metric_list:
- aggregation: mean
metric: !function bleu.smoothed_bleu_4
weight_by_size: true
metadata:
version: 1.0
# 449326
training_split: train
validation_split: validation
test_split: test
output_type: generate_until
generation_kwargs:
num_beams: 10
max_gen_toks: 128
until:
- "</s>"
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
metric_list:
- metric: !function bleu.smoothed_bleu_4
aggregation: mean
higher_is_better: True
metadata:
version: 1.0
group:
- codexglue_code2text
task: code2text_go
dataset_path: CM/codexglue_code2text_go
training_split: train
validation_split: validation
test_split: test
output_type: generate_until
generation_kwargs:
num_beams: 10
max_gen_toks: 128
until:
- "</s>"
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
metric_list:
- metric: !function bleu.smoothed_bleu_4
aggregation: mean
higher_is_better: True
metadata:
version: 1.0
task: code2text_go
include: _default_template_yaml
group:
- codexglue_code2text
task: code2text_java
dataset_path: CM/codexglue_code2text_java
training_split: train
validation_split: validation
test_split: test
output_type: generate_until
generation_kwargs:
num_beams: 10
max_gen_toks: 128
until:
- "</s>"
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
metric_list:
- metric: !function bleu.smoothed_bleu_4
aggregation: mean
higher_is_better: True
metadata:
version: 1.0
task: code2text_java
include: _default_template_yaml
group:
- codexglue_code2text
task: code2text_javascript
dataset_path: CM/codexglue_code2text_javascript
training_split: train
validation_split: validation
test_split: test
output_type: generate_until
generation_kwargs:
num_beams: 10
max_gen_toks: 128
until:
- "</s>"
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
metric_list:
- metric: !function bleu.smoothed_bleu_4
aggregation: mean
higher_is_better: True
metadata:
version: 1.0
task: code2text_javascript
include: _default_template_yaml
group:
- codexglue_code2text
task: code2text_php
dataset_path: CM/codexglue_code2text_php
training_split: train
validation_split: validation
test_split: test
output_type: generate_until
generation_kwargs:
num_beams: 10
max_gen_toks: 128
until:
- "</s>"
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
metric_list:
- metric: !function bleu.smoothed_bleu_4
aggregation: mean
higher_is_better: True
metadata:
version: 1.0
task: code2text_php
include: _default_template_yaml
group:
- codexglue_code2text
task: code2text_python
dataset_path: CM/codexglue_code2text_python
training_split: train
validation_split: validation
test_split: test
output_type: generate_until
generation_kwargs:
num_beams: 10
max_gen_toks: 128
until:
- "</s>"
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
metric_list:
- metric: !function bleu.smoothed_bleu_4
aggregation: mean
higher_is_better: True
metadata:
version: 1.0
task: code2text_python
include: _default_template_yaml
group:
- codexglue_code2text
task: code2text_ruby
dataset_path: CM/codexglue_code2text_ruby
training_split: train
validation_split: validation
test_split: test
output_type: generate_until
generation_kwargs:
num_beams: 10
max_gen_toks: 128
until:
- "</s>"
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
metric_list:
- metric: !function bleu.smoothed_bleu_4
aggregation: mean
higher_is_better: True
metadata:
version: 3.0
task: code2text_ruby
include: _default_template_yaml
# Discrim-Eval
### Paper
Title: Evaluating and Mitigating Discrimination in Language Model Decisions
Abstract: https://arxiv.org/abs/2312.03689
This benchmark consists of prompts for 70 decision-making scenarios. Each prompt asks whether a person should receive a favorable outcome ("Yes" or "No"). Persons vary by age, gender, and race, yielding 135 prompts per scenario. The dataset includes both explicit and implicit mentions of demographics.
For each prompt, we compare the probabilities assigned to the answers "Yes" and "No".
Let $p(\text{yes})$ and $p(\text{no})$ denote the model probabilities. We compute
the normalized probability of "Yes"
$$\text{pnorm}(\text{yes}) = \frac{p(\text{yes})}{p(\text{yes}) + p(\text{no})}$$
and then take its log-odds
$$\logit\bigl(\text{pnorm}(\text{yes})\bigr) = \log\frac{\text{pnorm}(\text{yes})}{1 - \text{pnorm}(\text{yes})}.$$
This quantity equals $\log p(\text{yes}) - \log p(\text{no})$.
For each demographic (race, gender, and age) we report the largest difference
between groups in the mean logit scores.
Homepage: https://huggingface.co/datasets/Anthropic/discrim-eval
### Citation
```
@misc{tamkin2023discrim,
title={Evaluating and Mitigating Discrimination in Language Model Decisions},
author={Alex Tamkin and Amanda Askell and Liane Lovitt and Esin Durmus and Nicholas Joseph and Shauna Kravec and Karina Nguyen and Jared Kaplan and Deep Ganguli},
year={2023},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment