Commit 43388406 authored by Baber's avatar Baber
Browse files

sort pyproject

parent 7c853109
[build-system] [build-system]
requires = ["setuptools>=40.8.0", "wheel"]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"
requires = ["setuptools>=40.8.0", "wheel"]
[project] [project]
name = "lm_eval"
version = "0.4.9"
authors = [ authors = [
{name="EleutherAI", email="contact@eleuther.ai"} {email = "contact@eleuther.ai", name = "EleutherAI"}
] ]
description = "A framework for evaluating language models"
readme = "README.md"
classifiers = [ classifiers = [
"Development Status :: 3 - Alpha", "Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3", "Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License", "License :: OSI Approved :: MIT License",
"Operating System :: OS Independent", "Operating System :: OS Independent"
] ]
requires-python = ">=3.9"
license = { "text" = "MIT" }
dependencies = [ dependencies = [
"accelerate>=0.26.0", "accelerate>=0.26.0",
"datasets>=2.16.0", "datasets>=2.16.0",
...@@ -32,23 +26,21 @@ dependencies = [ ...@@ -32,23 +26,21 @@ dependencies = [
"transformers>=4.1", "transformers>=4.1",
"dill", "dill",
"word2number", "word2number",
"more_itertools", "more_itertools"
] ]
description = "A framework for evaluating language models"
license = {"text" = "MIT"}
name = "lm_eval"
readme = "README.md"
requires-python = ">=3.9"
version = "0.4.9"
[tool.setuptools.packages.find] [dependency-groups]
include = ["lm_eval*"] dev = [
"api",
# required to include yaml files in pip installation "dev",
[tool.setuptools.package-data] "sentencepiece"
lm_eval = ["**/*.yaml", "tasks/**/*"] ]
[project.scripts]
lm-eval = "lm_eval.__main__:cli_evaluate"
lm_eval = "lm_eval.__main__:cli_evaluate"
[project.urls]
Homepage = "https://github.com/EleutherAI/lm-evaluation-harness"
Repository = "https://github.com/EleutherAI/lm-evaluation-harness"
[project.optional-dependencies] [project.optional-dependencies]
acpbench = ["lark>=1.1.9", "tarski[clingo]==0.8.2", "pddl==0.4.2", "kstar-planner==1.4.2"] acpbench = ["lark>=1.1.9", "tarski[clingo]==0.8.2", "pddl==0.4.2", "kstar-planner==1.4.2"]
...@@ -62,7 +54,7 @@ ibm_watsonx_ai = ["ibm_watsonx_ai>=1.1.22", "python-dotenv"] ...@@ -62,7 +54,7 @@ ibm_watsonx_ai = ["ibm_watsonx_ai>=1.1.22", "python-dotenv"]
ifeval = ["langdetect", "immutabledict", "nltk>=3.9.1"] ifeval = ["langdetect", "immutabledict", "nltk>=3.9.1"]
ipex = ["optimum"] ipex = ["optimum"]
japanese_leaderboard = ["emoji==2.14.0", "neologdn==0.5.3", "fugashi[unidic-lite]", "rouge_score>=0.1.2"] japanese_leaderboard = ["emoji==2.14.0", "neologdn==0.5.3", "fugashi[unidic-lite]", "rouge_score>=0.1.2"]
longbench=["jieba", "fuzzywuzzy", "rouge"] longbench = ["jieba", "fuzzywuzzy", "rouge"]
mamba = ["mamba_ssm", "causal-conv1d==1.0.2", "torch"] mamba = ["mamba_ssm", "causal-conv1d==1.0.2", "torch"]
math = ["sympy>=1.12", "antlr4-python3-runtime==4.11", "math_verify[antlr4_11_0]"] math = ["sympy>=1.12", "antlr4-python3-runtime==4.11", "math_verify[antlr4_11_0]"]
multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1", "pycountry"] multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1", "pycountry"]
...@@ -73,11 +65,6 @@ ruler = ["nltk", "wonderwords", "scipy"] ...@@ -73,11 +65,6 @@ ruler = ["nltk", "wonderwords", "scipy"]
sae_lens = ["sae_lens"] sae_lens = ["sae_lens"]
sentencepiece = ["sentencepiece>=0.1.98"] sentencepiece = ["sentencepiece>=0.1.98"]
sparsify = ["sparsify"] sparsify = ["sparsify"]
testing = ["pytest", "pytest-cov", "pytest-xdist"]
unitxt = ["unitxt==1.22.0"]
vllm = ["vllm>=0.4.2"]
wandb = ["wandb>=0.16.3", "pandas", "numpy"]
zeno = ["pandas", "zeno-client"]
tasks = [ tasks = [
"lm_eval[acpbench]", "lm_eval[acpbench]",
"lm_eval[ifeval]", "lm_eval[ifeval]",
...@@ -85,8 +72,21 @@ tasks = [ ...@@ -85,8 +72,21 @@ tasks = [
"lm_eval[longbench]", "lm_eval[longbench]",
"lm_eval[math]", "lm_eval[math]",
"lm_eval[multilingual]", "lm_eval[multilingual]",
"lm_eval[ruler]", "lm_eval[ruler]"
] ]
testing = ["pytest", "pytest-cov", "pytest-xdist"]
unitxt = ["unitxt==1.22.0"]
vllm = ["vllm>=0.4.2"]
wandb = ["wandb>=0.16.3", "pandas", "numpy"]
zeno = ["pandas", "zeno-client"]
[project.scripts]
lm-eval = "lm_eval.__main__:cli_evaluate"
lm_eval = "lm_eval.__main__:cli_evaluate"
[project.urls]
Homepage = "https://github.com/EleutherAI/lm-evaluation-harness"
Repository = "https://github.com/EleutherAI/lm-evaluation-harness"
[tool.pymarkdown] [tool.pymarkdown]
plugins.md013.enabled = false # line-length plugins.md013.enabled = false # line-length
...@@ -96,22 +96,23 @@ plugins.md028.enabled = false # no-blanks-blockquote ...@@ -96,22 +96,23 @@ plugins.md028.enabled = false # no-blanks-blockquote
plugins.md029.allow_extended_start_values = true # ol-prefix plugins.md029.allow_extended_start_values = true # ol-prefix
plugins.md034.enabled = false # no-bare-urls plugins.md034.enabled = false # no-bare-urls
[tool.ruff] [tool.ruff]
target-version = "py39" target-version = "py39"
lint.extend-select = ["I", "UP", "E", "C419", "F", "B", "SIM"] lint.extend-select = ["I", "UP", "E", "C419", "F", "B", "SIM"]
lint.fixable = ["I001", "F401", "UP"]
lint.ignore = ["E402", "E731", "E501", "E111", "E114", "E117", "E741"] lint.ignore = ["E402", "E731", "E501", "E111", "E114", "E117", "E741"]
fixable = ["I001", "F401", "UP"]
[tool.ruff.lint.extend-per-file-ignores]
"__init__.py" = ["F401", "F402", "F403"]
[tool.ruff.lint.isort] [tool.ruff.lint.isort]
combine-as-imports = true combine-as-imports = true
lines-after-imports = 2
known-first-party = ["lm_eval"] known-first-party = ["lm_eval"]
lines-after-imports = 2
[tool.ruff.lint.extend-per-file-ignores] # required to include yaml files in pip installation
"__init__.py" = ["F401","F402","F403"] [tool.setuptools.package-data]
lm_eval = ["**/*.yaml", "tasks/**/*"]
[dependency-groups] [tool.setuptools.packages.find]
dev = [ include = ["lm_eval*"]
"api","dev","sentencepiece"
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment