Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
43388406
Commit
43388406
authored
Jul 23, 2025
by
Baber
Browse files
sort pyproject
parent
7c853109
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
60 additions
and
59 deletions
+60
-59
pyproject.toml
pyproject.toml
+60
-59
No files found.
pyproject.toml
View file @
43388406
[build-system]
requires
=
[
"setuptools>=40.8.0"
,
"wheel"
]
build-backend
=
"setuptools.build_meta"
requires
=
[
"setuptools>=40.8.0"
,
"wheel"
]
[project]
name
=
"lm_eval"
version
=
"0.4.9"
authors
=
[
{
name=
"EleutherAI"
,
email
=
"contact@eleuther.ai"
}
{email
=
"contact@eleuther.ai"
,
name
=
"EleutherAI"
}
]
description
=
"A framework for evaluating language models"
readme
=
"README.md"
classifiers
=
[
"Development Status :: 3 - Alpha"
,
"Programming Language :: Python :: 3"
,
"License :: OSI Approved :: MIT License"
,
"Operating System :: OS Independent"
,
"Development Status :: 3 - Alpha"
,
"Programming Language :: Python :: 3"
,
"License :: OSI Approved :: MIT License"
,
"Operating System :: OS Independent"
]
requires-python
=
">=3.9"
license
=
{
"text"
=
"MIT"
}
dependencies
=
[
"accelerate>=0.26.0"
,
"datasets>=2.16.0"
,
"evaluate>=0.4.0"
,
"peft>=0.2.0"
,
"pytablewriter"
,
"rouge-score>=0.0.4"
,
"sacrebleu>=1.5.0"
,
"scikit-learn>=0.24.1"
,
"sqlitedict"
,
"torch>=1.8"
,
"transformers>=4.1"
,
"dill"
,
"word2number"
,
"more_itertools"
,
"accelerate>=0.26.0"
,
"datasets>=2.16.0"
,
"evaluate>=0.4.0"
,
"peft>=0.2.0"
,
"pytablewriter"
,
"rouge-score>=0.0.4"
,
"sacrebleu>=1.5.0"
,
"scikit-learn>=0.24.1"
,
"sqlitedict"
,
"torch>=1.8"
,
"transformers>=4.1"
,
"dill"
,
"word2number"
,
"more_itertools"
]
description
=
"A framework for evaluating language models"
license
=
{
"text"
=
"MIT"
}
name
=
"lm_eval"
readme
=
"README.md"
requires-python
=
">=3.9"
version
=
"0.4.9"
[tool.setuptools.packages.find]
include
=
["lm_eval*"]
# required to include yaml files in pip installation
[tool.setuptools.package-data]
lm_eval
=
[
"**/*.yaml"
,
"tasks/**/*"
]
[project.scripts]
lm-eval
=
"lm_eval.__main__:cli_evaluate"
lm_eval
=
"lm_eval.__main__:cli_evaluate"
[project.urls]
Homepage
=
"https://github.com/EleutherAI/lm-evaluation-harness"
Repository
=
"https://github.com/EleutherAI/lm-evaluation-harness"
[dependency-groups]
dev
=
[
"api"
,
"dev"
,
"sentencepiece"
]
[project.optional-dependencies]
acpbench
=
[
"lark>=1.1.9"
,
"tarski[clingo]==0.8.2"
,
"pddl==0.4.2"
,
"kstar-planner==1.4.2"
]
...
...
@@ -62,7 +54,7 @@ ibm_watsonx_ai = ["ibm_watsonx_ai>=1.1.22", "python-dotenv"]
ifeval
=
[
"langdetect"
,
"immutabledict"
,
"nltk>=3.9.1"
]
ipex
=
["optimum"]
japanese_leaderboard
=
[
"emoji==2.14.0"
,
"neologdn==0.5.3"
,
"fugashi[unidic-lite]"
,
"rouge_score>=0.1.2"
]
longbench
=
[
"jieba"
,
"fuzzywuzzy"
,
"rouge"
]
longbench
=
[
"jieba"
,
"fuzzywuzzy"
,
"rouge"
]
mamba
=
[
"mamba_ssm"
,
"causal-conv1d==1.0.2"
,
"torch"
]
math
=
[
"sympy>=1.12"
,
"antlr4-python3-runtime==4.11"
,
"math_verify[antlr4_11_0]"
]
multilingual
=
[
"nagisa>=0.2.7"
,
"jieba>=0.42.1"
,
"pycountry"
]
...
...
@@ -73,20 +65,28 @@ ruler = ["nltk", "wonderwords", "scipy"]
sae_lens
=
["sae_lens"]
sentencepiece
=
["sentencepiece>=0.1.98"]
sparsify
=
["sparsify"]
tasks
=
[
"lm_eval[acpbench]"
,
"lm_eval[ifeval]"
,
"lm_eval[japanese_leaderboard]"
,
"lm_eval[longbench]"
,
"lm_eval[math]"
,
"lm_eval[multilingual]"
,
"lm_eval[ruler]"
]
testing
=
[
"pytest"
,
"pytest-cov"
,
"pytest-xdist"
]
unitxt
=
["unitxt==1.22.0"]
vllm
=
["vllm>=0.4.2"]
wandb
=
[
"wandb>=0.16.3"
,
"pandas"
,
"numpy"
]
zeno
=
[
"pandas"
,
"zeno-client"
]
tasks
=
[
"lm_eval[acpbench]"
,
"lm_eval[ifeval]"
,
"lm_eval[japanese_leaderboard]"
,
"lm_eval[longbench]"
,
"lm_eval[math]"
,
"lm_eval[multilingual]"
,
"lm_eval[ruler]"
,
]
[project.scripts]
lm-eval
=
"lm_eval.__main__:cli_evaluate"
lm_eval
=
"lm_eval.__main__:cli_evaluate"
[project.urls]
Homepage
=
"https://github.com/EleutherAI/lm-evaluation-harness"
Repository
=
"https://github.com/EleutherAI/lm-evaluation-harness"
[tool.pymarkdown]
plugins.md013.enabled
=
false
# line-length
...
...
@@ -96,22 +96,23 @@ plugins.md028.enabled = false # no-blanks-blockquote
plugins.md029.allow_extended_start_values
=
true
# ol-prefix
plugins.md034.enabled
=
false
# no-bare-urls
[tool.ruff]
target-version
=
"py39"
lint.extend-select
=
[
"I"
,
"UP"
,
"E"
,
"C419"
,
"F"
,
"B"
,
"SIM"
]
lint.fixable
=
[
"I001"
,
"F401"
,
"UP"
]
lint.ignore
=
[
"E402"
,
"E731"
,
"E501"
,
"E111"
,
"E114"
,
"E117"
,
"E741"
]
fixable
=
[
"I001"
,
"F401"
,
"UP"
]
[tool.ruff.lint.extend-per-file-ignores]
"__init__.py"
=
[
"F401"
,
"F402"
,
"F403"
]
[tool.ruff.lint.isort]
combine-as-imports
=
true
lines-after-imports
=
2
known-first-party
=
["lm_eval"]
lines-after-imports
=
2
[tool.ruff.lint.extend-per-file-ignores]
"__init__.py"
=
["F401","F402","F403"]
# required to include yaml files in pip installation
[tool.setuptools.package-data]
lm_eval
=
[
"**/*.yaml"
,
"tasks/**/*"
]
[dependency-groups]
dev
=
[
"api"
,
"dev"
,
"sentencepiece"
]
[tool.setuptools.packages.find]
include
=
["lm_eval*"]
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment