Commit 7d6ec4d9 authored by Baber's avatar Baber
Browse files

Merge branch 'main' into metrics

# Conflicts:
#	lm_eval/__init__.py
#	pyproject.toml
parents 1020c46e d021bf84
__version__ = "0.4.9"
import logging
import os
__version__ = "0.4.9.1"
# Lazy-load .evaluator module to improve CLI startup
......
......@@ -2,7 +2,6 @@ from __future__ import annotations
import copy
import gc
import inspect
import logging
import os
from importlib.metadata import version
......@@ -35,7 +34,7 @@ from lm_eval.utils import (
try:
import ray
from vllm import LLM, SamplingParams
from vllm import LLM, SamplingParams, TokensPrompt
from vllm.lora.request import LoRARequest
from vllm.transformers_utils.tokenizer import get_tokenizer
from vllm.utils import get_open_port
......@@ -81,7 +80,7 @@ def _vllm_mp_worker(
try:
llm = LLM(**model_args)
res = llm.generate(
prompt_token_ids=requests,
[TokensPrompt(prompt_token_ids=request) for request in requests],
sampling_params=sampling_params,
lora_request=lora_request,
)
......@@ -241,13 +240,6 @@ class VLLM(TemplateLM):
model_config = engine_args.create_model_config()
kwargs_resolve_hf_chat_template["model_config"] = model_config
# https://github.com/vllm-project/vllm/pull/18259
if (
"trsut_remote_code"
in inspect.signature(resolve_hf_chat_template).parameters
):
kwargs_resolve_hf_chat_template["trsut_remote_code"] = trust_remote_code
else:
kwargs_resolve_hf_chat_template["trust_remote_code"] = trust_remote_code
......@@ -397,7 +389,7 @@ class VLLM(TemplateLM):
):
llm = LLM(**model_args)
return llm.generate(
prompt_token_ids=requests,
[TokensPrompt(prompt_token_ids=request) for request in requests],
sampling_params=sampling_params,
lora_request=lora_request,
)
......@@ -486,7 +478,7 @@ class VLLM(TemplateLM):
else:
outputs = self.model.generate(
prompt_token_ids=requests,
[TokensPrompt(prompt_token_ids=request) for request in requests],
sampling_params=sampling_params,
use_tqdm=self.batch_size == "auto",
lora_request=self.lora_request,
......
......@@ -81,7 +81,7 @@ class TaskManager:
task_index = {}
for task_dir in all_paths:
tasks = self._get_task_and_group(task_dir)
task_index = {**tasks, **task_index}
task_index = {**task_index, **tasks}
return task_index
......
......@@ -52,3 +52,5 @@ If other tasks on this dataset are already supported:
v2 20-MAR-2025: `humaneval_instruct`, `humaneval_instruct_64`: fixed typo in gen_prefix
v3 30-JUN-2025: Updated prompt generation and output parsing to align with the official `Llama-3.1-70B-Instruct-evals`. This corrects the prompt format and fixes a bug in locating the code block. See PR [#3092](https://github.com/EleutherAI/lm-evaluation-harness/pull/3092).
v4 01-AUG-2025: Synchronized definitions between `humaneval_instruct` and `humaneval_instruct_64`. The former had a trailing space in `gen_prefix`, and the latter's `doc_to_text` was outdated.
include: humaneval_64.yaml
task: humaneval_64_instruct
doc_to_text: "Write a solution to the following problem and make sure that it passes the tests:\n```{{prompt}}"
doc_to_text: "Write a solution to the following problem and make sure that it passes the tests:\n```python\n{{ prompt }}\n```\n"
gen_prefix: "Here is the completed function:\n```python\n{{prompt}}\n"
filter_list:
- name: "create_test"
......@@ -8,4 +8,4 @@ filter_list:
- function: "custom"
filter_fn: !function utils.build_predictions_instruct
metadata:
version: 2.0
version: 3.0
include: humaneval.yaml
task: humaneval_instruct
doc_to_text: "Write a solution to the following problem and make sure that it passes the tests:\n```python\n{{ prompt }}\n```\n "
gen_prefix: "Here is the completed function:\n```python\n{{ prompt }}\n "
doc_to_text: "Write a solution to the following problem and make sure that it passes the tests:\n```python\n{{ prompt }}\n```\n"
gen_prefix: "Here is the completed function:\n```python\n{{ prompt }}\n"
filter_list:
- name: "create_test"
filter:
- function: "custom"
filter_fn: !function utils.build_predictions_instruct
metadata:
version: 3.0
version: 4.0
......@@ -36,56 +36,56 @@ Homepage: `https://github.com/facebookresearch/MLQA`
#### Tasks
Tasks of the form `mlqa_context-lang_question-lang.yaml`
* `mlqa_ar_ar.yaml`
* `mlqa_ar_de.yaml`
* `mlqa_ar_vi.yaml`
* `mlqa_ar_zh.yaml`
* `mlqa_ar_en.yaml`
* `mlqa_ar_es.yaml`
* `mlqa_ar_hi.yaml`
* `mlqa_de_ar.yaml`
* `mlqa_de_de.yaml`
* `mlqa_de_vi.yaml`
* `mlqa_de_zh.yaml`
* `mlqa_de_en.yaml`
* `mlqa_de_es.yaml`
* `mlqa_de_hi.yaml`
* `mlqa_vi_ar.yaml`
* `mlqa_vi_de.yaml`
* `mlqa_vi_vi.yaml`
* `mlqa_vi_zh.yaml`
* `mlqa_vi_en.yaml`
* `mlqa_vi_es.yaml`
* `mlqa_vi_hi.yaml`
* `mlqa_zh_ar.yaml`
* `mlqa_zh_de.yaml`
* `mlqa_zh_vi.yaml`
* `mlqa_zh_zh.yaml`
* `mlqa_zh_en.yaml`
* `mlqa_zh_es.yaml`
* `mlqa_zh_hi.yaml`
* `mlqa_en_ar.yaml`
* `mlqa_en_de.yaml`
* `mlqa_en_vi.yaml`
* `mlqa_en_zh.yaml`
* `mlqa_en_en.yaml`
* `mlqa_en_es.yaml`
* `mlqa_en_hi.yaml`
* `mlqa_es_ar.yaml`
* `mlqa_es_de.yaml`
* `mlqa_es_vi.yaml`
* `mlqa_es_zh.yaml`
* `mlqa_es_en.yaml`
* `mlqa_es_es.yaml`
* `mlqa_es_hi.yaml`
* `mlqa_hi_ar.yaml`
* `mlqa_hi_de.yaml`
* `mlqa_hi_vi.yaml`
* `mlqa_hi_zh.yaml`
* `mlqa_hi_en.yaml`
* `mlqa_hi_es.yaml`
* `mlqa_hi_hi.yaml`
Tasks of the form `mlqa_context-lang_question-lang`
* `mlqa_ar_ar`
* `mlqa_ar_de`
* `mlqa_ar_vi`
* `mlqa_ar_zh`
* `mlqa_ar_en`
* `mlqa_ar_es`
* `mlqa_ar_hi`
* `mlqa_de_ar`
* `mlqa_de_de`
* `mlqa_de_vi`
* `mlqa_de_zh`
* `mlqa_de_en`
* `mlqa_de_es`
* `mlqa_de_hi`
* `mlqa_vi_ar`
* `mlqa_vi_de`
* `mlqa_vi_vi`
* `mlqa_vi_zh`
* `mlqa_vi_en`
* `mlqa_vi_es`
* `mlqa_vi_hi`
* `mlqa_zh_ar`
* `mlqa_zh_de`
* `mlqa_zh_vi`
* `mlqa_zh_zh`
* `mlqa_zh_en`
* `mlqa_zh_es`
* `mlqa_zh_hi`
* `mlqa_en_ar`
* `mlqa_en_de`
* `mlqa_en_vi`
* `mlqa_en_zh`
* `mlqa_en_en`
* `mlqa_en_es`
* `mlqa_en_hi`
* `mlqa_es_ar`
* `mlqa_es_de`
* `mlqa_es_vi`
* `mlqa_es_zh`
* `mlqa_es_en`
* `mlqa_es_es`
* `mlqa_es_hi`
* `mlqa_hi_ar`
* `mlqa_hi_de`
* `mlqa_hi_vi`
* `mlqa_hi_zh`
* `mlqa_hi_en`
* `mlqa_hi_es`
* `mlqa_hi_hi`
### Checklist
......
......@@ -71,3 +71,6 @@ switch to original implementation
ver 2: PR #2116
add missing newline in description.
PR #3137
Fix `mmlu_continuation` subgroup names to fit other variants, and switch dataset from `hails/mmlu_no_train` to `cais/mmlu` in all subtasks.
dataset_path: hails/mmlu_no_train # a copy of `cais/mmlu` with no auxiliary_train split
dataset_path: cais/mmlu
output_type: multiple_choice
test_split: test
fewshot_split: dev
......
......@@ -3,25 +3,25 @@ group_alias: mmlu (continuation)
task:
- group: stem
task:
- mmlu_continuation_stem
- mmlu_stem_continuation
aggregate_metric_list:
- metric: acc
weight_by_size: True
- group: other
task:
- mmlu_continuation_other
- mmlu_other_continuation
aggregate_metric_list:
- metric: acc
weight_by_size: True
- group: social sciences
task:
- mmlu_continuation_social_sciences
- mmlu_social_sciences_continuation
aggregate_metric_list:
- metric: acc
weight_by_size: True
- group: humanities
task:
- mmlu_continuation_humanities
- mmlu_humanities_continuation
aggregate_metric_list:
- metric: acc
weight_by_size: True
......
"dataset_name": "abstract_algebra"
"description": "The following are questions (with answers) about abstract\
\ algebra.\n\n"
"tag": "mmlu_continuation_stem"
"tag": "mmlu_stem_continuation"
"include": "_continuation_template_yaml"
"task": "mmlu_continuation_abstract_algebra"
"task": "mmlu_abstract_algebra_continuation"
"dataset_name": "anatomy"
"description": "The following are questions (with answers) about anatomy.\n\
\n"
"tag": "mmlu_continuation_stem"
"tag": "mmlu_stem_continuation"
"include": "_continuation_template_yaml"
"task": "mmlu_continuation_anatomy"
"task": "mmlu_anatomy_continuation"
"dataset_name": "astronomy"
"description": "The following are questions (with answers) about astronomy.\n\
\n"
"tag": "mmlu_continuation_stem"
"tag": "mmlu_stem_continuation"
"include": "_continuation_template_yaml"
"task": "mmlu_continuation_astronomy"
"task": "mmlu_astronomy_continuation"
"dataset_name": "business_ethics"
"description": "The following are questions (with answers) about business\
\ ethics.\n\n"
"tag": "mmlu_continuation_other"
"tag": "mmlu_other_continuation"
"include": "_continuation_template_yaml"
"task": "mmlu_continuation_business_ethics"
"task": "mmlu_business_ethics_continuation"
"dataset_name": "clinical_knowledge"
"description": "The following are questions (with answers) about clinical\
\ knowledge.\n\n"
"tag": "mmlu_continuation_other"
"tag": "mmlu_other_continuation"
"include": "_continuation_template_yaml"
"task": "mmlu_continuation_clinical_knowledge"
"task": "mmlu_clinical_knowledge_continuation"
"dataset_name": "college_biology"
"description": "The following are questions (with answers) about college\
\ biology.\n\n"
"tag": "mmlu_continuation_stem"
"tag": "mmlu_stem_continuation"
"include": "_continuation_template_yaml"
"task": "mmlu_continuation_college_biology"
"task": "mmlu_college_biology_continuation"
"dataset_name": "college_chemistry"
"description": "The following are questions (with answers) about college\
\ chemistry.\n\n"
"tag": "mmlu_continuation_stem"
"tag": "mmlu_stem_continuation"
"include": "_continuation_template_yaml"
"task": "mmlu_continuation_college_chemistry"
"task": "mmlu_college_chemistry_continuation"
"dataset_name": "college_computer_science"
"description": "The following are questions (with answers) about college\
\ computer science.\n\n"
"tag": "mmlu_continuation_stem"
"tag": "mmlu_stem_continuation"
"include": "_continuation_template_yaml"
"task": "mmlu_continuation_college_computer_science"
"task": "mmlu_college_computer_science_continuation"
"dataset_name": "college_mathematics"
"description": "The following are questions (with answers) about college\
\ mathematics.\n\n"
"tag": "mmlu_continuation_stem"
"tag": "mmlu_stem_continuation"
"include": "_continuation_template_yaml"
"task": "mmlu_continuation_college_mathematics"
"task": "mmlu_college_mathematics_continuation"
"dataset_name": "college_medicine"
"description": "The following are questions (with answers) about college\
\ medicine.\n\n"
"tag": "mmlu_continuation_other"
"tag": "mmlu_other_continuation"
"include": "_continuation_template_yaml"
"task": "mmlu_continuation_college_medicine"
"task": "mmlu_college_medicine_continuation"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment