Commit 02e841ce authored by lintangsutawika's avatar lintangsutawika
Browse files

Merge branch 'main' of https://github.com/EleutherAI/lm-evaluation-harness into t5v2-alt-plus

parents 90ad5db7 e74ec966
......@@ -18,3 +18,5 @@ metric_list:
ignore_punctuation: false
metadata:
version: 2.0
dataset_kwargs:
trust_remote_code: true
......@@ -18,3 +18,5 @@ metric_list:
ignore_punctuation: false
metadata:
version: 2.0
dataset_kwargs:
trust_remote_code: true
......@@ -18,3 +18,5 @@ metric_list:
ignore_punctuation: false
metadata:
version: 2.0
dataset_kwargs:
trust_remote_code: true
......@@ -18,3 +18,5 @@ metric_list:
ignore_punctuation: false
metadata:
version: 2.0
dataset_kwargs:
trust_remote_code: true
......@@ -16,3 +16,5 @@ metric_list:
- metric: bits_per_byte
metadata:
version: 2.0
dataset_kwargs:
trust_remote_code: true
# WMDP
### Paper
Title: `The WMDP Benchmark: Measuring and Reducing Malicious Use With Unlearning`
Abstract: `https://arxiv.org/abs/2403.03218`
`The Weapons of Mass Destruction Proxy (WMDP) benchmark is a dataset of 4,157 multiple-choice questions surrounding hazardous knowledge in biosecurity cybersecurity, and chemical security. WMDP serves as both a proxy evaluation for hazardous knowledge in large language models (LLMs) and a benchmark for unlearning methods to remove such knowledge.`
Homepage: https://wmdp.ai
### Citation
```
@misc{li2024wmdp,
title={The WMDP Benchmark: Measuring and Reducing Malicious Use With Unlearning},
author={Nathaniel Li and Alexander Pan and Anjali Gopal and Summer Yue and Daniel Berrios and Alice Gatti and Justin D. Li and Ann-Kathrin Dombrowski and Shashwat Goel and Long Phan and Gabriel Mukobi and Nathan Helm-Burger and Rassin Lababidi and Lennart Justen and Andrew B. Liu and Michael Chen and Isabelle Barrass and Oliver Zhang and Xiaoyuan Zhu and Rishub Tamirisa and Bhrugu Bharathi and Adam Khoja and Zhenqi Zhao and Ariel Herbert-Voss and Cort B. Breuer and Andy Zou and Mantas Mazeika and Zifan Wang and Palash Oswal and Weiran Liu and Adam A. Hunt and Justin Tienken-Harder and Kevin Y. Shih and Kemper Talley and John Guan and Russell Kaplan and Ian Steneker and David Campbell and Brad Jokubaitis and Alex Levinson and Jean Wang and William Qian and Kallol Krishna Karmakar and Steven Basart and Stephen Fitz and Mindy Levine and Ponnurangam Kumaraguru and Uday Tupakula and Vijay Varadharajan and Yan Shoshitaishvili and Jimmy Ba and Kevin M. Esvelt and Alexandr Wang and Dan Hendrycks},
year={2024},
eprint={2403.03218},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
### Groups and Tasks
#### Groups
* `wmdp`: All 4,157 multiple-choice questions in biosecurity, cybersecurity, and chemical security
#### Tasks
* `wmdp_bio`: 1,520 multiple-choice questions in biosecurity
* `wmdp_cyber`: 2,225 multiple-choice questions in cybersecurity
* `wmdp_chemistry`: 412 multiple-choice questions in chemical security
### Checklist
For adding novel benchmarks/datasets to the library:
* [x] Is the task an existing benchmark in the literature?
* [x] Have you referenced the original paper that introduced the task?
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
dataset_path: cais/wmdp
group: wmdp
test_split: test
training_split: null
validation_split: null
num_fewshot: 0
output_type: multiple_choice
doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:"
doc_to_choice: ["A", "B", "C", "D"]
doc_to_target: answer
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
metadata:
version: 0
"task": "wmdp_bio"
"dataset_name": "wmdp-bio"
"include": "_default_template_yaml"
"description": "The following are multiple choice questions (with answers) about biology.\n\n"
"task": "wmdp_chem"
"dataset_name": "wmdp-chem"
"include": "_default_template_yaml"
"description": "The following are multiple choice questions (with answers) about chemistry.\n\n"
"task": "wmdp_cyber"
"dataset_name": "wmdp-cyber"
"include": "_default_template_yaml"
"description": "The following are multiple choice questions (with answers) about cybersecurity.\n\n"
......@@ -21,7 +21,7 @@ license = { "text" = "MIT" }
dependencies = [
"accelerate>=0.21.0",
"evaluate",
"datasets>=2.14.0",
"datasets>=2.16.0",
"evaluate>=0.4.0",
"jsonlines",
"numexpr",
......@@ -38,6 +38,7 @@ dependencies = [
"zstandard",
"dill",
"word2number",
"more_itertools",
]
[tool.setuptools.packages.find]
......@@ -70,7 +71,7 @@ optimum = ["optimum[openvino]"]
promptsource = ["promptsource>=0.2.3"]
sentencepiece = ["sentencepiece>=0.1.98", "protobuf>=4.22.1"]
testing = ["pytest", "pytest-cov", "pytest-xdist"]
vllm = ["vllm<=0.2.5"]
vllm = ["vllm==0.3.2"]
zeno = ["pandas", "zeno-client"]
wandb = ["wandb>=0.16.3", "pandas", "numpy"]
all = [
......@@ -94,9 +95,9 @@ all = [
[tool.ruff.lint]
extend-select = ["I"]
[tool.ruff.isort]
[tool.ruff.lint.isort]
lines-after-imports = 2
known-first-party = ["lm_eval"]
[tool.ruff.extend-per-file-ignores]
[tool.ruff.lint.extend-per-file-ignores]
"__init__.py" = ["F401","F402","F403"]
......@@ -75,7 +75,7 @@ std::vector<std::string> clean_ngram(std::string const &input,
gram_lengths.erase(gram_lengths.begin());
gram_lengths.push_back(0);
// Otherwise, continute building
// Otherwise, continue building
} else {
current_ngram += ' ';
gram_lengths.push_back(0);
......@@ -165,7 +165,7 @@ clean_ngram_with_indices(std::string const &input, std::string const &ignore,
gram_start_indices.erase(gram_start_indices.begin());
gram_start_indices.push_back(i + 1);
// Otherwise, continute building
// Otherwise, continue building
} else {
current_ngram += ' ';
gram_lengths.push_back(0);
......
......@@ -3,7 +3,7 @@ import random
import transformers
from lm_eval import evaluator, tasks
from lm_eval.base import LM
from lm_eval.api.model import LM
class DryrunLM(LM):
......@@ -53,13 +53,12 @@ def main():
values = []
for taskname in task_list.split(","):
lm.tokencost = 0
evaluator.evaluate(
evaluator.simple_evaluate(
lm=lm,
task_dict={taskname: tasks.get_task(taskname)()},
num_fewshot=0,
limit=None,
bootstrap_iters=10,
description_dict=None,
)
print(taskname, lm.tokencost)
......
......@@ -120,110 +120,3 @@ class TestNewTasks:
requests = [task.construct_requests(doc, task.doc_to_text(doc)) for doc in arr]
# assert all(isinstance(doc, list) for doc in requests)
assert len(requests) == limit if limit else True
# def test_create_choices(task_class):
# arr = list(islice(task_class().test_docs(), 1))
# choices = task_class().create_choices(arr[0])
# assert choices is not None
# checking if number of choices is correct
# @pytest.mark.parametrize("taskname,task_class", tasks.TASK_REGISTRY.items())
# def test_basic_interface(taskname, task_class):
# print("Evaluating task", taskname)
# task = task_class()
#
# assert task.has_training_docs() in [True, False]
# assert task.has_validation_docs() in [True, False]
# assert task.has_test_docs() in [True, False]
#
# assert isinstance(task.aggregation(), dict)
# assert isinstance(task.higher_is_better(), dict)
# assert task.aggregation().keys() == task.higher_is_better().keys()
#
# for v in task.higher_is_better().values():
# assert v in [True, False]
#
# assert isinstance(task.VERSION, int)
#
# # test deterministic docs
# # (don't test train because it's slow)
#
# task2 = task_class()
#
# limit = None
#
# if taskname in ["triviaqa"] or taskname.startswith("pile_"):
# limit = 10000
# if task.has_validation_docs():
# arr = list(islice(task.validation_docs(), limit))
# arr2 = list(islice(task2.validation_docs(), limit))
#
# assert arr == arr2
#
# reqs = [task.construct_requests(doc, task.doc_to_text(doc)) for doc in arr]
# reqs2 = [task2.construct_requests(doc, task2.doc_to_text(doc)) for doc in arr2]
#
# assert reqs == reqs2
#
# if task.has_test_docs():
# arr = list(islice(task.test_docs(), limit))
# arr2 = list(islice(task2.test_docs(), limit))
#
# assert arr == arr2
#
# reqs = [task.construct_requests(doc, task.doc_to_text(doc)) for doc in arr]
# reqs2 = [task2.construct_requests(doc, task2.doc_to_text(doc)) for doc in arr2]
#
# assert reqs == reqs2
#
# if task.has_training_docs():
# arr = list(islice(task.training_docs(), limit))
# arr2 = list(islice(task2.training_docs(), limit))
#
# assert arr == arr2
#
# reqs = [task.construct_requests(doc, task.doc_to_text(doc)) for doc in arr]
# reqs2 = [task2.construct_requests(doc, task2.doc_to_text(doc)) for doc in arr2]
#
# assert reqs == reqs2
#
#
# @pytest.mark.parametrize("taskname,task_class", tasks.TASK_REGISTRY.items())
# def test_documents_and_requests(taskname, task_class):
# print("Evaluating task", taskname)
# task = task_class()
# fns = []
# if task.has_training_docs():
# fns.append(task.training_docs)
# if task.has_validation_docs():
# fns.append(task.validation_docs)
# # test doc might not have labels
# # if task.has_test_docs(): fns.append(task.test_docs)
#
# for fn in fns:
# # print(list(islice(fn(), 10)))
# for doc in islice(fn(), 10):
#
# txt = task.doc_to_text(doc)
# tgt = task.doc_to_target(doc)
#
# assert isinstance(txt, str)
# assert isinstance(tgt, str)
#
# # space convention
# # allow txt to have length 0 for perplexity-like tasks since the model tacks an <|endoftext|> on
# if len(txt) != 0:
# assert txt[-1] != " "
# assert tgt[0] == " " or txt[-1] == "\n"
#
# reqs = task.construct_requests(doc, txt)
#
# # construct_requests can return just one request
# if not isinstance(reqs, (list, tuple)):
# reqs = [reqs]
#
# # todo: mock lm after refactoring evaluator.py to not be a mess
# # for req in reqs:
# # assert isinstance(req, base.Request)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment