Commit 8d59330b authored by lintangsutawika's avatar lintangsutawika
Browse files

resolved merge conflict

parents 110e5a28 d4a913c4
task: pile_10k
dataset_path: NeelNanda/pile-10k
dataset_name: null
output_type: loglikelihood_rolling
test_split: train
doc_to_text: ""
doc_to_target: "text"
metric_list:
- metric: word_perplexity
aggregation: weighted_perplexity
higher_is_better: false
- metric: byte_perplexity
aggregation: weighted_perplexity
higher_is_better: false
- metric: bits_per_byte
aggregation: bits_per_byte
higher_is_better: false
metadata:
version: 1.0
group: scrolls
task:
- task: scrolls_qasper
class: !function task.Qasper
- task: scrolls_quality
class: !function task.QuALITY
- task: scrolls_narrativeqa
class: !function task.NarrativeQA
- task: scrolls_contractnli
class: !function task.ContractNLI
- task: scrolls_govreport
class: !function task.GovReport
- task: scrolls_summscreenfd
class: !function task.SummScreenFD
- task: scrolls_qmsum
class: !function task.QMSum
group: scrolls
task: scrolls_contractnli
class: !function task.ContractNLI
group: scrolls
task: scrolls_govreport
class: !function task.GovReport
group: scrolls
task: scrolls_narrativeqa
class: !function task.NarrativeQA
group: scrolls
task: scrolls_qasper
class: !function task.Qasper
group: scrolls
task: scrolls_qmsum
class: !function task.QMSum
group: scrolls
task: scrolls_quality
class: !function task.QuALITY
group: scrolls
task: scrolls_summscreenfd
class: !function task.SummScreenFD
# Squad-completion
### Paper
Title: Simple Linear Attention Language Models Balance The Recall-Throughput Tradeoff
A Variant of the SQuAD question answering task, as implemented by Based. See [https://github.com/EleutherAI/lm-evaluation-harness/lm_eval/tasks/squadv2/README.md] for more info.
Homepage: https://github.com/HazyResearch/based-evaluation-harness
### Citation
```
@misc{arora2024simple,
title={Simple linear attention language models balance the recall-throughput tradeoff},
author={Simran Arora and Sabri Eyuboglu and Michael Zhang and Aman Timalsina and Silas Alberti and Dylan Zinsley and James Zou and Atri Rudra and Christopher Ré},
year={2024},
eprint={2402.18668},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@misc{rajpurkar2018know,
title={Know What You Don't Know: Unanswerable Questions for SQuAD},
author={Pranav Rajpurkar and Robin Jia and Percy Liang},
year={2018},
eprint={1806.03822},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
### Groups and Tasks
#### Tasks
* `squad_completion`: the SQuAD task as implemented in the paper "Simple linear attention language models balance the recall-throughput tradeoff". Designed for zero-shot evaluation of small LMs.
### Checklist
For adding novel benchmarks/datasets to the library:
* [x] Is the task an existing benchmark in the literature?
* [x] Have you referenced the original paper that introduced the task?
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [x] Is the "Main" variant of this task clearly denoted?
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [x] Have you noted which, if any, published evaluation setups are matched by this variant?
task: squad_completion
class: !function task.SQUADCompletion
"""
"""
import re
from typing import List
import numpy as np
from lm_eval.api.instance import Instance
from lm_eval.api.task import ConfigurableTask
class SQUADCompletion(ConfigurableTask):
VERSION = 0
DATASET_PATH = "hazyresearch/based-squad"
DATASET_NAME = "default"
def __init__(self):
super().__init__(config={"metadata": {"version": self.VERSION}})
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return doc["text"]
def doc_to_target(self, doc):
return doc["value"]
def construct_requests(self, doc, ctx, **kwargs):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
return [
Instance(
request_type="generate_until",
doc=doc,
arguments=(ctx, {"until": ["\n"], "max_gen_toks": 48}),
idx=0,
**kwargs,
)
]
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
# continuation, (logprob_unanswerable, _) = results
continuation = results
return {"contains": contains_score(continuation[0], [doc["value"]])}
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
return {
"contains": np.mean, # Exact match (the normalized answer exactly match the gold answer)
}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {
"contains": True, # Exact match (the normalized answer exactly match the gold answer
}
def contains_score(prediction: str, labels: List[str]):
return max(
int(bool(re.search(re.compile(re.escape(label), re.IGNORECASE), prediction)))
for label in labels
)
# SWDE
### Paper
Title: Language Models Enable Simple Systems For
Generating Structured Views Of Heterogenous Data
Lakes
Abstract: A long standing goal of the data management community is to develop general, automated systems
that ingest semi-structured documents and output queryable tables without human effort or domain
specific customization. Given the sheer variety of potential documents, state-of-the art systems make
simplifying assumptions and use domain specific training. In this work, we ask whether we can
maintain generality by using large language models (LLMs). LLMs, which are pretrained on broad
data, can perform diverse downstream tasks simply conditioned on natural language task descriptions.
We propose and evaluate EVAPORATE, a simple, prototype system powered by LLMs. We identify
two fundamentally different strategies for implementing this system: prompt the LLM to directly
extract values from documents or prompt the LLM to synthesize code that performs the extraction.
Our evaluations show a cost-quality tradeoff between these two approaches. Code synthesis is cheap,
but far less accurate than directly processing each document with the LLM. To improve quality while
maintaining low cost, we propose an extended code synthesis implementation, EVAPORATE-CODE+,
which achieves better quality than direct extraction. Our key insight is to generate many candidate
functions and ensemble their extractions using weak supervision. EVAPORATE-CODE+ not only
outperforms the state-of-the art systems, but does so using a sublinear pass over the documents with
the LLM. This equates to a 110× reduction in the number of tokens the LLM needs to process,
averaged across 16 real-world evaluation settings of 10k documents each.
A task for LMs to perform Information Extraction, as implemented by Based.
Homepage: https://github.com/HazyResearch/based-evaluation-harness
Description:
> SWDE (Information Extraction). The task in the SWDE benchmark is to extract semi-structured relations from raw HTML websites. For example, given an IMBD page for a movie (e.g. Harry Potter and the Sorcerer’s Stone) and a relation key (e.g. release date), the model must extract the correct relation value (e.g. 2001). The SWDE benchmark was originally curated by Lockard et al. for the task of open information extraction from the semi-structured web. Because we are evaluating the zero-shot capabilities of relatively small language models, we adapt the task to make it slightly easier. Our task setup is similar after to that used in Arora et al.
### Citation
```
@misc{arora2024simple,
title={Simple linear attention language models balance the recall-throughput tradeoff},
author={Simran Arora and Sabri Eyuboglu and Michael Zhang and Aman Timalsina and Silas Alberti and Dylan Zinsley and James Zou and Atri Rudra and Christopher Ré},
year={2024},
eprint={2402.18668},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@misc{arora2023language,
title={Language Models Enable Simple Systems for Generating Structured Views of Heterogeneous Data Lakes},
author={Simran Arora and Brandon Yang and Sabri Eyuboglu and Avanika Narayan and Andrew Hojel and Immanuel Trummer and Christopher Ré},
year={2023},
eprint={2304.09433},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@inproceedings{lockard-etal-2019-openceres,
title = "{O}pen{C}eres: {W}hen Open Information Extraction Meets the Semi-Structured Web",
author = "Lockard, Colin and
Shiralkar, Prashant and
Dong, Xin Luna",
editor = "Burstein, Jill and
Doran, Christy and
Solorio, Thamar",
booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/N19-1309",
doi = "10.18653/v1/N19-1309",
pages = "3047--3056",
abstract = "Open Information Extraction (OpenIE), the problem of harvesting triples from natural language text whose predicate relations are not aligned to any pre-defined ontology, has been a popular subject of research for the last decade. However, this research has largely ignored the vast quantity of facts available in semi-structured webpages. In this paper, we define the problem of OpenIE from semi-structured websites to extract such facts, and present an approach for solving it. We also introduce a labeled evaluation dataset to motivate research in this area. Given a semi-structured website and a set of seed facts for some relations existing on its pages, we employ a semi-supervised label propagation technique to automatically create training data for the relations present on the site. We then use this training data to learn a classifier for relation extraction. Experimental results of this method on our new benchmark dataset obtained a precision of over 70{\%}. A larger scale extraction experiment on 31 websites in the movie vertical resulted in the extraction of over 2 million triples.",
}
```
### Groups and Tasks
#### Tasks
* `swde`: the SWDE task as implemented in the paper "Simple linear attention language models balance the recall-throughput tradeoff". Designed for zero-shot evaluation of small LMs.
### Checklist
For adding novel benchmarks/datasets to the library:
* [x] Is the task an existing benchmark in the literature?
* [x] Have you referenced the original paper that introduced the task?
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [x] Is the "Main" variant of this task clearly denoted?
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [x] Have you noted which, if any, published evaluation setups are matched by this variant?
task: swde
class: !function task.SWDE
import re
from typing import List
import numpy as np
from lm_eval.api.instance import Instance
from lm_eval.api.task import ConfigurableTask
class SWDE(ConfigurableTask):
VERSION = 0
DATASET_PATH = "hazyresearch/based-swde-v2"
DATASET_NAME = "default"
def __init__(self):
super().__init__(config={"metadata": {"version": self.VERSION}})
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return doc["text"]
def doc_to_target(self, doc):
return doc["value"]
def construct_requests(self, doc, ctx, **kwargs):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
return [
Instance(
request_type="generate_until",
doc=doc,
arguments=(ctx, {"until": ["\n"], "max_gen_toks": 48}),
idx=0,
**kwargs,
)
]
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
# continuation, (logprob_unanswerable, _) = results
continuation = results
return {"contains": contains_score(continuation[0], [doc["value"]])}
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
return {
"contains": np.mean, # Exact match (the normalized answer exactly match the gold answer)
}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {
"contains": True, # Exact match (the normalized answer exactly match the gold answer
}
def contains_score(prediction: str, labels: List[str]):
return max(
int(bool(re.search(re.compile(re.escape(label), re.IGNORECASE), prediction)))
for label in labels
)
import collections import collections
import fnmatch import fnmatch
import functools import functools
import hashlib
import importlib.util import importlib.util
import inspect import inspect
import json
import logging import logging
import os import os
import re import re
from dataclasses import asdict, is_dataclass
from itertools import islice from itertools import islice
from typing import Any, Callable, List from typing import Any, Callable, List
...@@ -24,6 +27,10 @@ eval_logger = logging.getLogger("lm-eval") ...@@ -24,6 +27,10 @@ eval_logger = logging.getLogger("lm-eval")
SPACING = " " * 47 SPACING = " " * 47
def hash_string(string: str) -> str:
return hashlib.sha256(string.encode("utf-8")).hexdigest()
def escaped_split(text, sep_char, maxsplit=-1): def escaped_split(text, sep_char, maxsplit=-1):
"""Split text into a list on occurrences of the given separation """Split text into a list on occurrences of the given separation
character `sep_char`. The separation character may be escaped by a character `sep_char`. The separation character may be escaped by a
...@@ -60,6 +67,15 @@ def handle_arg_string(arg): ...@@ -60,6 +67,15 @@ def handle_arg_string(arg):
return arg return arg
def handle_non_serializable(o):
if isinstance(o, np.int64) or isinstance(o, np.int32):
return int(o)
elif isinstance(o, set):
return list(o)
else:
return str(o)
def simple_parse_args_string(args_string): def simple_parse_args_string(args_string):
""" """
Parses something like Parses something like
...@@ -166,6 +182,18 @@ def make_disjoint_window(pair): ...@@ -166,6 +182,18 @@ def make_disjoint_window(pair):
return a[: len(a) - (len(b) - 1)], b return a[: len(a) - (len(b) - 1)], b
class EnhancedJSONEncoder(json.JSONEncoder):
"""
Provides a proper json encoding for the loggers and trackers json dumps.
Notably manages the json encoding of dataclasses.
"""
def default(self, o):
if is_dataclass(o):
return asdict(o)
return super().default(o)
class Reorderer: class Reorderer:
def __init__(self, arr: List[Any], fn: Callable) -> None: def __init__(self, arr: List[Any], fn: Callable) -> None:
"""Reorder an array according to some function """Reorder an array according to some function
...@@ -214,7 +242,7 @@ class Reorderer: ...@@ -214,7 +242,7 @@ class Reorderer:
return res return res
def make_table(result_dict, column: str = "results"): def make_table(result_dict, column: str = "results", sort_results: bool = True):
"""Generate table of results.""" """Generate table of results."""
from pytablewriter import LatexTableWriter, MarkdownTableWriter from pytablewriter import LatexTableWriter, MarkdownTableWriter
...@@ -241,12 +269,14 @@ def make_table(result_dict, column: str = "results"): ...@@ -241,12 +269,14 @@ def make_table(result_dict, column: str = "results"):
values = [] values = []
for k, dic in result_dict[column].items(): keys = result_dict[column].keys()
if sort_results:
# sort entries alphabetically
keys = sorted(keys)
for k in keys:
dic = result_dict[column][k]
version = result_dict["versions"].get(k, " N/A") version = result_dict["versions"].get(k, " N/A")
if k in result_dict["n-shot"]: n = str(result_dict["n-shot"][k])
n = str(result_dict["n-shot"][k])
else:
n = " "
if "alias" in dic: if "alias" in dic:
k = dic.pop("alias") k = dic.pop("alias")
......
...@@ -23,6 +23,7 @@ class Test_HFLM: ...@@ -23,6 +23,7 @@ class Test_HFLM:
MULTIPLE_CH: list[Instance] = multiple_choice_task.instances MULTIPLE_CH: list[Instance] = multiple_choice_task.instances
generate_until_task = task_list["gsm8k"] # type: ignore generate_until_task = task_list["gsm8k"] # type: ignore
generate_until_task._config.generation_kwargs["max_gen_toks"] = 10 generate_until_task._config.generation_kwargs["max_gen_toks"] = 10
generate_until_task.set_fewshot_seed(1234) # fewshot random generator seed
generate_until_task.build_all_requests(limit=10, rank=0, world_size=1) generate_until_task.build_all_requests(limit=10, rank=0, world_size=1)
generate_until: list[Instance] = generate_until_task.instances generate_until: list[Instance] = generate_until_task.instances
rolling_task = task_list["wikitext"] # type: ignore rolling_task = task_list["wikitext"] # type: ignore
......
import random import random
import tempfile import tempfile
from pathlib import Path
import pytest import pytest
from optimum.intel import OVModelForCausalLM from optimum.intel import OVModelForCausalLM
...@@ -71,3 +72,21 @@ def test_evaluator(model_id, task): ...@@ -71,3 +72,21 @@ def test_evaluator(model_id, task):
limit=limit, limit=limit,
bootstrap_iters=10, bootstrap_iters=10,
) )
def test_ov_config():
"""Test that if specified, a custom OpenVINO config is loaded correctly"""
model_id = "hf-internal-testing/tiny-random-gpt2"
with tempfile.TemporaryDirectory() as tmpdirname:
config_file = str(Path(tmpdirname) / "ov_config.json")
with open(Path(config_file), "w") as f:
f.write('{"DYNAMIC_QUANTIZATION_GROUP_SIZE" : "32"}')
lm = get_model("openvino").create_from_arg_string(
f"pretrained={model_id},ov_config={config_file}"
)
assert (
lm.model.request.get_compiled_model().get_property(
"DYNAMIC_QUANTIZATION_GROUP_SIZE"
)
== 32
)
...@@ -21,12 +21,18 @@ from lm_eval import tasks ...@@ -21,12 +21,18 @@ from lm_eval import tasks
10, 10,
"hf", "hf",
"pretrained=EleutherAI/pythia-160m,dtype=float32,device=cpu", "pretrained=EleutherAI/pythia-160m,dtype=float32,device=cpu",
) ),
(
["mmlu_abstract_algebra"],
None,
"hf",
"pretrained=EleutherAI/pythia-160m,dtype=float32,device=cpu",
),
], ],
) )
def test_evaluator(task_name: List[str], limit: int, model: str, model_args: str): def test_evaluator(task_name: List[str], limit: int, model: str, model_args: str):
task_name = task_name # task_name = task_name
limit = 10 # limit = 10
e1 = evaluator.simple_evaluate( e1 = evaluator.simple_evaluate(
model=model, model=model,
...@@ -57,7 +63,10 @@ def test_evaluator(task_name: List[str], limit: int, model: str, model_args: str ...@@ -57,7 +63,10 @@ def test_evaluator(task_name: List[str], limit: int, model: str, model_args: str
# check that caching is working # check that caching is working
def r(x): def r(x):
return x["results"]["arc_easy"] if "arc_easy" in x["results"]:
return x["results"]["arc_easy"]
else:
return x["results"]["mmlu_abstract_algebra"]
assert all( assert all(
x == y x == y
......
...@@ -20,8 +20,8 @@ sys.path.append(f"{MODULE_DIR}/../scripts") ...@@ -20,8 +20,8 @@ sys.path.append(f"{MODULE_DIR}/../scripts")
model_loader = importlib.import_module("requests_caching") model_loader = importlib.import_module("requests_caching")
run_model_for_task_caching = model_loader.run_model_for_task_caching run_model_for_task_caching = model_loader.run_model_for_task_caching
os.environ["HF_DATASETS_TRUST_REMOTE_CODE"] = "1"
DEFAULT_TASKS = ["lambada_openai", "hellaswag"] DEFAULT_TASKS = ["lambada_openai", "sciq"]
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
...@@ -64,16 +64,16 @@ def assert_created(tasks: List[str], file_task_names: List[str]): ...@@ -64,16 +64,16 @@ def assert_created(tasks: List[str], file_task_names: List[str]):
@pytest.mark.parametrize("tasks", [DEFAULT_TASKS]) @pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
def test_requests_caching_true(tasks: List[str]): def requests_caching_true(tasks: List[str]):
run_model_for_task_caching(tasks=tasks, cache_requests="true") run_model_for_task_caching(tasks=tasks, cache_requests="true")
cache_files, file_task_names = get_cache_files() cache_files, file_task_names = get_cache_files()
print(file_task_names)
assert_created(tasks=tasks, file_task_names=file_task_names) assert_created(tasks=tasks, file_task_names=file_task_names)
@pytest.mark.parametrize("tasks", [DEFAULT_TASKS]) @pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
def test_requests_caching_refresh(tasks: List[str]): def requests_caching_refresh(tasks: List[str]):
run_model_for_task_caching(tasks=tasks, cache_requests="true") run_model_for_task_caching(tasks=tasks, cache_requests="true")
timestamp_before_test = datetime.now().timestamp() timestamp_before_test = datetime.now().timestamp()
...@@ -93,9 +93,9 @@ def test_requests_caching_refresh(tasks: List[str]): ...@@ -93,9 +93,9 @@ def test_requests_caching_refresh(tasks: List[str]):
@pytest.mark.parametrize("tasks", [DEFAULT_TASKS]) @pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
def test_requests_caching_delete(tasks: List[str]): def requests_caching_delete(tasks: List[str]):
# populate the data first, rerun this test within this test for additional confidence # populate the data first, rerun this test within this test for additional confidence
test_requests_caching_true(tasks=tasks) # test_requests_caching_true(tasks=tasks)
run_model_for_task_caching(tasks=tasks, cache_requests="delete") run_model_for_task_caching(tasks=tasks, cache_requests="delete")
...@@ -109,9 +109,9 @@ if __name__ == "__main__": ...@@ -109,9 +109,9 @@ if __name__ == "__main__":
def run_tests(): def run_tests():
tests = [ tests = [
test_requests_caching_true, # test_requests_caching_true,
test_requests_caching_refresh, # test_requests_caching_refresh,
test_requests_caching_delete, # test_requests_caching_delete,
] ]
for test_func in tests: for test_func in tests:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment