"test/vscode:/vscode.git/clone" did not exist on "d9a4e92e97ea549226b0b68076b0a5712ff6b54f"
Commit 56abc3a1 authored by lintangsutawika's avatar lintangsutawika
Browse files

Merge branch 'main' of https://github.com/EleutherAI/lm-evaluation-harness into alt_worlds

parents 1b7d57cf aa61f940
......@@ -29,7 +29,7 @@ jobs:
cache: pip
cache-dependency-path: setup.py
- name: Install dependencies
run: pip install -e '.[linting,testing]' --extra-index-url https://download.pytorch.org/whl/cpu
run: pip install -e '.[linting,testing]' --extra-index-url https://download.pytorch.org/whl/cpu ; export SKIP=no-commit-to-branch # env var deactivates --no-commit-to-branch
- name: Pre-Commit
uses: pre-commit/action@v3.0.0
- name: Lint with pylint
......
......@@ -85,7 +85,7 @@ lm_eval --model hf \
--batch_size 8
```
Models that are loaded via both `transformers.AutoModelForCausalLM` (autoregressive, decoder-only GPT style models) and `transformers.AutoModelForSeq2SeqLM` (such as encoder-decoder models like T5) in Huggingface are supporteded.
Models that are loaded via both `transformers.AutoModelForCausalLM` (autoregressive, decoder-only GPT style models) and `transformers.AutoModelForSeq2SeqLM` (such as encoder-decoder models like T5) in Huggingface are supported.
Batch size selection can be automated by setting the ```--batch_size``` flag to ```auto```. This will perform automatic detection of the largest batch size that will fit on your device. On tasks where there is a large difference between the longest and shortest example, it can be helpful to periodically recompute the largest batch size, to gain a further speedup. To do this, append ```:N``` to above flag to automatically recompute the largest batch size ```N``` times. For example, to recompute the batch size 4 times, the command would be:
......@@ -149,7 +149,7 @@ Our library also supports the evaluation of models served via several commercial
To call a hosted model, use:
```bash
export OPENAI_API_SECRET_KEY=YOUR_KEY_HERE
export OPENAI_API_KEY=YOUR_KEY_HERE
lm_eval --model openai-completions \
--model_args engine=davinci \
--tasks lambada_openai,hellaswag
......
import os
import time
from typing import List, Tuple
from typing import List, Tuple, Optional
import copy
from collections import defaultdict
......@@ -11,7 +11,7 @@ from lm_eval.api.model import LM
from lm_eval.api.registry import register_model
def get_result(response: dict, ctxlen: int) -> Tuple[float, bool]:
def get_result(response, ctxlen: int) -> Tuple[float, bool]:
"""Process results from OpenAI API response.
:param response: dict
......@@ -25,12 +25,12 @@ def get_result(response: dict, ctxlen: int) -> Tuple[float, bool]:
whether argmax matches given continuation exactly
"""
is_greedy = True
logprobs = response["logprobs"]["token_logprobs"]
logprobs = response.logprobs.token_logprobs
continuation_logprobs = sum(logprobs[ctxlen:])
for i in range(ctxlen, len(response["logprobs"]["tokens"])):
token = response["logprobs"]["tokens"][i]
top_tokens = response["logprobs"]["top_logprobs"][i]
for i in range(ctxlen, len(response.logprobs.token_logprobs)):
token = response.logprobs.token_logprobs[i]
top_tokens = response.logprobs.top_logprobs[i]
top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x])
if top_token != token:
is_greedy = False
......@@ -55,8 +55,8 @@ please install these via `pip install lm-eval[openai]` or `pip install -e .[open
backoff_time = 3
while True:
try:
return openai.Completions.create(**kwargs)
except openai.error.OpenAIError:
return openai.completions.create(**kwargs)
except openai.OpenAIError:
import traceback
traceback.print_exc()
......@@ -64,15 +64,19 @@ please install these via `pip install lm-eval[openai]` or `pip install -e .[open
backoff_time *= 1.5
@register_model("gooseai")
@register_model("openai-completions")
class OpenaiCompletionsLM(LM):
REQ_CHUNK_SIZE = 20
_DEFAULT_MAX_LENGTH = 2048
def __init__(
self,
engine: str = "text-davinci-003",
model: str = "text-davinci-003",
truncate: bool = False,
max_gen_toks: int = 256,
batch_size: int = 1,
seed: int = 1234,
max_length: Optional[int] = None,
) -> None:
"""
......@@ -82,6 +86,7 @@ class OpenaiCompletionsLM(LM):
Truncate input if too long (if False and input is too long, throw error)
"""
super().__init__()
self.seed = seed
try:
import openai, tiktoken # noqa: E401
except ModuleNotFoundError:
......@@ -89,14 +94,16 @@ class OpenaiCompletionsLM(LM):
"attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \
please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`",
)
self.engine = engine
self.tokenizer = tiktoken.encoding_for_model(self.engine)
self.model = model
self.tokenizer = tiktoken.encoding_for_model(self.model)
self.vocab_size = self.tokenizer.n_vocab
self.truncate = truncate
self.end_of_text_token_id = self.tokenizer.eot_token
self._max_gen_toks = max_gen_toks
self._max_length = max_length
# Read from environment variable OPENAI_API_SECRET_KEY
openai.api_key = os.environ["OPENAI_API_SECRET_KEY"]
openai.api_key = os.environ["OPENAI_API_KEY"]
@property
def eot_token_id(self):
......@@ -104,12 +111,14 @@ class OpenaiCompletionsLM(LM):
@property
def max_length(self) -> int:
# Note: the OpenAI API supports up to 2049 tokens, with the first token being the first input token
return 2048
if self._max_length:
return self._max_length
else:
return self._DEFAULT_MAX_LENGTH
@property
def max_gen_toks(self) -> int:
return 256
return self._max_gen_toks
@property
def batch_size(self):
......@@ -187,12 +196,13 @@ class OpenaiCompletionsLM(LM):
ctxlens.append(ctxlen)
response = oa_completion(
engine=self.engine,
model=self.model,
prompt=inps,
echo=True,
max_tokens=0,
temperature=0.0,
logprobs=10,
seed=self.seed,
)
for resp, ctxlen, (cache_key, context_enc, continuation_enc) in zip(
......@@ -242,21 +252,22 @@ class OpenaiCompletionsLM(LM):
inp = context_enc[-(self.max_length - self.max_gen_toks) :]
inps.append(inp)
until = request_args.get("until", ["<|endoftext|>"])
until = request_args.pop("until", ["<|endoftext|>"])
request_args.pop("do_sample", None)
request_args["temperature"] = request_args.get("temperature", 0)
response = oa_completion(
engine=self.engine,
model=self.model,
prompt=inps,
max_tokens=self.max_gen_toks,
temperature=0.0,
logprobs=10,
stop=until,
seed=self.seed,
**request_args,
)
for resp, (context, args_) in zip(response.choices, chunk):
s = resp["text"]
s = getattr(resp, "text")
until_ = args_.get("until", ["<|endoftext|>"])
until_ = until
for term in until_:
if len(term) > 0:
......
......@@ -139,7 +139,6 @@ please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
generate: bool = False,
max_tokens: int = None,
stop: Optional[List[str]] = None,
use_tqdm=True,
**kwargs,
):
if "do_sample" in kwargs.keys():
......@@ -169,7 +168,7 @@ please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
outputs = self.model.generate(
prompt_token_ids=requests,
sampling_params=sampling_params,
use_tqdm=use_tqdm,
use_tqdm=True if self.batch_size == "auto" else False,
)
return outputs
......
# IFEval
### Paper
Title: Instruction-Following Evaluation for Large Language Models
Abstract: https://arxiv.org/abs/2311.07911
One core capability of Large Language Models (LLMs) is to follow natural language instructions. However, the evaluation of such abilities is not standardized: Human evaluations are expensive, slow, and not objectively reproducible, while LLM-based auto-evaluation is potentially biased or limited by the ability of the evaluator LLM. To overcome these issues, we introduce Instruction-Following Eval (IFEval) for large language models. IFEval is a straightforward and easy-to-reproduce evaluation benchmark. It focuses on a set of "verifiable instructions" such as "write in more than 400 words" and "mention the keyword of AI at least 3 times". We identified 25 types of those verifiable instructions and constructed around 500 prompts, with each prompt containing one or more verifiable instructions. We show evaluation results of two widely available LLMs on the market. Our code and data can be found at https://github.com/google-research/google-research/tree/master/instruction_following_eval
Homepage: https://github.com/google-research/google-research/tree/master/instruction_following_eval
### Citation
```
@article{zhou2023instructionfollowing,
title={Instruction-Following Evaluation for Large Language Models},
author={Jeffrey Zhou and Tianjian Lu and Swaroop Mishra and Siddhartha Brahma and Sujoy Basu and Yi Luan and Denny Zhou and Le Hou},
journal={arXiv preprint arXiv:2311.07911},
year={2023},
}
```
### Groups and Tasks
#### Groups
* Not part of a group yet
#### Tasks
* `ifeval`
### Checklist
For adding novel benchmarks/datasets to the library:
* [x] Is the task an existing benchmark in the literature?
* [x] Have you referenced the original paper that introduced the task?
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
task: ifeval
dataset_path: wis-k/instruction-following-eval
dataset_name: null
output_type: generate_until
test_split: train
num_fewshot: 0
doc_to_text: prompt
doc_to_target: 0
generation_kwargs:
until: []
do_sample: false
temperature: 0.0
max_gen_toks: 1280
process_results: !function utils.process_results
metric_list:
- metric: prompt_level_strict_acc
aggregation: mean
higher_is_better: true
- metric: inst_level_strict_acc
aggregation: !function utils.agg_inst_level_acc
higher_is_better: true
- metric: prompt_level_loose_acc
aggregation: mean
higher_is_better: true
- metric: inst_level_loose_acc
aggregation: !function utils.agg_inst_level_acc
higher_is_better: true
metadata:
- version: 1.0
This diff is collapsed.
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry of all instructions."""
from lm_eval.tasks.ifeval import instructions
_KEYWORD = "keywords:"
_LANGUAGE = "language:"
_LENGTH = "length_constraints:"
_CONTENT = "detectable_content:"
_FORMAT = "detectable_format:"
_MULTITURN = "multi-turn:"
_COMBINATION = "combination:"
_STARTEND = "startend:"
_CHANGE_CASES = "change_case:"
_PUNCTUATION = "punctuation:"
INSTRUCTION_DICT = {
_KEYWORD + "existence": instructions.KeywordChecker,
_KEYWORD + "frequency": instructions.KeywordFrequencyChecker,
# TODO(jeffreyzhou): make a proper set of sentences to choose from
# _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
_KEYWORD + "forbidden_words": instructions.ForbiddenWords,
_KEYWORD + "letter_frequency": instructions.LetterFrequencyChecker,
_LANGUAGE + "response_language": instructions.ResponseLanguageChecker,
_LENGTH + "number_sentences": instructions.NumberOfSentences,
_LENGTH + "number_paragraphs": instructions.ParagraphChecker,
_LENGTH + "number_words": instructions.NumberOfWords,
_LENGTH + "nth_paragraph_first_word": instructions.ParagraphFirstWordCheck,
_CONTENT + "number_placeholders": instructions.PlaceholderChecker,
_CONTENT + "postscript": instructions.PostscriptChecker,
_FORMAT + "number_bullet_lists": instructions.BulletListChecker,
# TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
# _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
_FORMAT + "constrained_response": instructions.ConstrainedResponseChecker,
_FORMAT + "number_highlighted_sections": (instructions.HighlightSectionChecker),
_FORMAT + "multiple_sections": instructions.SectionChecker,
# TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
# _FORMAT + "rephrase": instructions.RephraseChecker,
_FORMAT + "json_format": instructions.JsonFormat,
_FORMAT + "title": instructions.TitleChecker,
# TODO(tianjianlu): Re-enable with specific prompts.
# _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
_COMBINATION + "two_responses": instructions.TwoResponsesChecker,
_COMBINATION + "repeat_prompt": instructions.RepeatPromptThenAnswer,
_STARTEND + "end_checker": instructions.EndChecker,
_CHANGE_CASES + "capital_word_frequency": instructions.CapitalWordFrequencyChecker,
_CHANGE_CASES + "english_capital": instructions.CapitalLettersEnglishChecker,
_CHANGE_CASES + "english_lowercase": instructions.LowercaseLettersEnglishChecker,
_PUNCTUATION + "no_comma": instructions.CommaChecker,
_STARTEND + "quotation": instructions.QuotationChecker,
}
INSTRUCTION_CONFLICTS = {
_KEYWORD + "existence": {_KEYWORD + "existence"},
_KEYWORD + "frequency": {_KEYWORD + "frequency"},
# TODO(jeffreyzhou): make a proper set of sentences to choose from
# _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
_KEYWORD + "forbidden_words": {_KEYWORD + "forbidden_words"},
_KEYWORD + "letter_frequency": {_KEYWORD + "letter_frequency"},
_LANGUAGE
+ "response_language": {
_LANGUAGE + "response_language",
_FORMAT + "multiple_sections",
_KEYWORD + "existence",
_KEYWORD + "frequency",
_KEYWORD + "forbidden_words",
_STARTEND + "end_checker",
_CHANGE_CASES + "english_capital",
_CHANGE_CASES + "english_lowercase",
},
_LENGTH + "number_sentences": {_LENGTH + "number_sentences"},
_LENGTH
+ "number_paragraphs": {
_LENGTH + "number_paragraphs",
_LENGTH + "nth_paragraph_first_word",
_LENGTH + "number_sentences",
_LENGTH + "nth_paragraph_first_word",
},
_LENGTH + "number_words": {_LENGTH + "number_words"},
_LENGTH
+ "nth_paragraph_first_word": {
_LENGTH + "nth_paragraph_first_word",
_LENGTH + "number_paragraphs",
},
_CONTENT + "number_placeholders": {_CONTENT + "number_placeholders"},
_CONTENT + "postscript": {_CONTENT + "postscript"},
_FORMAT + "number_bullet_lists": {_FORMAT + "number_bullet_lists"},
# TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
# _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
_FORMAT + "constrained_response": set(INSTRUCTION_DICT.keys()),
_FORMAT + "number_highlighted_sections": {_FORMAT + "number_highlighted_sections"},
_FORMAT
+ "multiple_sections": {
_FORMAT + "multiple_sections",
_LANGUAGE + "response_language",
_FORMAT + "number_highlighted_sections",
},
# TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
# _FORMAT + "rephrase": instructions.RephraseChecker,
_FORMAT
+ "json_format": set(INSTRUCTION_DICT.keys()).difference(
{_KEYWORD + "forbidden_words", _KEYWORD + "existence"}
),
_FORMAT + "title": {_FORMAT + "title"},
# TODO(tianjianlu): Re-enable with specific prompts.
# _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
_COMBINATION
+ "two_responses": set(INSTRUCTION_DICT.keys()).difference(
{
_KEYWORD + "forbidden_words",
_KEYWORD + "existence",
_LANGUAGE + "response_language",
_FORMAT + "title",
_PUNCTUATION + "no_comma",
}
),
_COMBINATION
+ "repeat_prompt": set(INSTRUCTION_DICT.keys()).difference(
{_KEYWORD + "existence", _FORMAT + "title", _PUNCTUATION + "no_comma"}
),
_STARTEND + "end_checker": {_STARTEND + "end_checker"},
_CHANGE_CASES
+ "capital_word_frequency": {
_CHANGE_CASES + "capital_word_frequency",
_CHANGE_CASES + "english_lowercase",
_CHANGE_CASES + "english_capital",
},
_CHANGE_CASES + "english_capital": {_CHANGE_CASES + "english_capital"},
_CHANGE_CASES
+ "english_lowercase": {
_CHANGE_CASES + "english_lowercase",
_CHANGE_CASES + "english_capital",
},
_PUNCTUATION + "no_comma": {_PUNCTUATION + "no_comma"},
_STARTEND + "quotation": {_STARTEND + "quotation", _FORMAT + "title"},
}
def conflict_make(conflicts):
"""Makes sure if A conflicts with B, B will conflict with A.
Args:
conflicts: Dictionary of potential conflicts where key is instruction id
and value is set of instruction ids that it conflicts with.
Returns:
Revised version of the dictionary. All instructions conflict with
themselves. If A conflicts with B, B will conflict with A.
"""
for key in conflicts:
for k in conflicts[key]:
conflicts[k].add(key)
conflicts[key].add(key)
return conflicts
This diff is collapsed.
import dataclasses
from typing import Dict, Optional, Union
from lm_eval.tasks.ifeval import instructions_registry
from lm_eval.utils import eval_logger
@dataclasses.dataclass
class InputExample:
key: int
instruction_id_list: list[str]
prompt: str
kwargs: list[Dict[str, Optional[Union[str, int]]]]
@dataclasses.dataclass
class OutputExample:
instruction_id_list: list[str]
prompt: str
response: str
follow_all_instructions: bool
follow_instruction_list: list[bool]
def test_instruction_following_strict(
inp,
response,
):
"""Tests response to see if instructions are followed."""
instruction_list = inp.instruction_id_list
is_following_list = []
for index, instruction_id in enumerate(instruction_list):
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
instruction = instruction_cls(instruction_id)
# Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
instruction.build_description(**kwargs)
args = instruction.get_instruction_args()
if args and "prompt" in args:
instruction.build_description(prompt=inp.prompt)
if response.strip() and instruction.check_following(response):
is_following_list.append(True)
else:
is_following_list.append(False)
return OutputExample(
instruction_id_list=inp.instruction_id_list,
prompt=inp.prompt,
response=response,
follow_all_instructions=all(is_following_list),
follow_instruction_list=is_following_list,
)
def test_instruction_following_loose(
inp,
response,
):
"""Tests response for an upper bound for following instructions."""
r = response.split("\n")
response_remove_first = "\n".join(r[1:]).strip()
response_remove_last = "\n".join(r[:-1]).strip()
response_remove_both = "\n".join(r[1:-1]).strip()
revised_response = response.replace("*", "")
revised_response_remove_first = response_remove_first.replace("*", "")
revised_response_remove_last = response_remove_last.replace("*", "")
revised_response_remove_both = response_remove_both.replace("*", "")
all_responses = [
response,
revised_response,
response_remove_first,
response_remove_last,
response_remove_both,
revised_response_remove_first,
revised_response_remove_last,
revised_response_remove_both,
]
instruction_list = inp.instruction_id_list
is_following_list = []
for index, instruction_id in enumerate(instruction_list):
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
instruction = instruction_cls(instruction_id)
# Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
instruction.build_description(**kwargs)
args = instruction.get_instruction_args()
if args and "prompt" in args:
instruction.build_description(prompt=inp.prompt)
is_following = False
for r in all_responses:
if r.strip() and instruction.check_following(r):
is_following = True
break
is_following_list.append(is_following)
return OutputExample(
instruction_id_list=inp.instruction_id_list,
prompt=inp.prompt,
response=response,
follow_all_instructions=all(is_following_list),
follow_instruction_list=is_following_list,
)
def process_results(doc, results):
eval_logger.warning(
"This task is meant for chat-finetuned models, and may not give meaningful results for models other than `openai` or `anthropic` if `doc_to_text` in its YAML is not wrapped in the appropriate chat template string. This warning will be removed when chat templating support is added natively to local models"
)
inp = InputExample(
key=doc["key"],
instruction_id_list=doc["instruction_id_list"],
prompt=doc["prompt"],
kwargs=doc["kwargs"],
)
response = results[0]
out_strict = test_instruction_following_strict(inp, response)
out_loose = test_instruction_following_loose(inp, response)
return {
"prompt_level_strict_acc": out_strict.follow_all_instructions,
"inst_level_strict_acc": out_strict.follow_instruction_list,
"prompt_level_loose_acc": out_loose.follow_all_instructions,
"inst_level_loose_acc": out_loose.follow_instruction_list,
}
def agg_inst_level_acc(items):
flat_items = [item for sublist in items for item in sublist]
inst_level_acc = sum(flat_items) / len(flat_items)
return inst_level_acc
......@@ -70,8 +70,9 @@ promptsource = [
]
gptq = ["auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"]
anthropic = ["anthropic"]
openai = ["openai>=1.3.5", "tiktoken"]
openai = ["openai==1.3.9", "tiktoken"]
vllm = ["vllm"]
ifeval = ["langdetect", "immutabledict"]
all = [
"lm_eval[dev]",
"lm_eval[testing]",
......@@ -83,4 +84,5 @@ all = [
"lm_eval[anthropic]",
"lm_eval[openai]",
"lm_eval[vllm]",
"lm_eval[ifeval]",
]
......@@ -2,6 +2,7 @@ import argparse
import numpy as np
import lm_eval.evaluator
from lm_eval import tasks
from lm_eval import utils
import scipy.stats
from typing import Tuple, Dict, List
import pandas as pd
......@@ -9,7 +10,13 @@ import torch
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
eval_logger = lm_eval.utils.eval_logger
eval_logger = utils.eval_logger
def memory_stats():
eval_logger.info(
f"Memory allocated: {torch.cuda.memory_allocated() / 1024 ** 2}, reserved: {torch.cuda.memory_reserved() // 1024 ** 2}"
)
def calculate_z_value(res1: Dict, res2: Dict) -> Tuple[float, float]:
......@@ -103,7 +110,10 @@ if __name__ == "__main__":
device=args.device,
batch_size=args.batch,
)
torch.cuda.empty_cache()
memory_stats()
utils.clear_torch_cache()
eval_logger.info("Memory stats cleared")
memory_stats()
results_hf = lm_eval.evaluator.simple_evaluate(
model="hf",
model_args=f"pretrained={args.pretrained}" + hf_args,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment