Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
ce9ba47e
Unverified
Commit
ce9ba47e
authored
Mar 19, 2025
by
Kiersten Stokes
Committed by
GitHub
Mar 19, 2025
Browse files
Clean up README and pyproject.toml (#2814)
parent
7123c6a5
Changes
2
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
67 additions
and
56 deletions
+67
-56
README.md
README.md
+50
-44
pyproject.toml
pyproject.toml
+17
-12
No files found.
README.md
View file @
ce9ba47e
This diff is collapsed.
Click to expand it.
pyproject.toml
View file @
ce9ba47e
...
@@ -59,54 +59,59 @@ Repository = "https://github.com/EleutherAI/lm-evaluation-harness"
...
@@ -59,54 +59,59 @@ Repository = "https://github.com/EleutherAI/lm-evaluation-harness"
[project.optional-dependencies]
[project.optional-dependencies]
api
=
[
"requests"
,
"aiohttp"
,
"tenacity"
,
"tqdm"
,
"tiktoken"
]
api
=
[
"requests"
,
"aiohttp"
,
"tenacity"
,
"tqdm"
,
"tiktoken"
]
audiolm_qwen
=
[
"librosa"
,
"soundfile"
]
audiolm_qwen
=
[
"librosa"
,
"soundfile"
]
dev
=
[
"pytest"
,
"pytest-cov"
,
"pytest-xdist"
,
"pre-commit"
,
"mypy"
,
"unitxt"
]
deepsparse
=
["deepsparse-nightly[llm]>=1.8.0.20240404"]
deepsparse
=
["deepsparse-nightly[llm]>=1.8.0.20240404"]
dev
=
[
"pytest"
,
"pytest-cov"
,
"pytest-xdist"
,
"pre-commit"
,
"mypy"
,
"unitxt"
]
gptq
=
["auto-gptq[triton]>=0.6.0"]
gptq
=
["auto-gptq[triton]>=0.6.0"]
gptqmodel
=
["gptqmodel>=1.0.9"]
hf_transfer
=
["hf_transfer"]
hf_transfer
=
["hf_transfer"]
ibm_watsonx_ai
=
[
"ibm_watsonx_ai>=1.1.22"
,
"python-dotenv"
]
ibm_watsonx_ai
=
[
"ibm_watsonx_ai>=1.1.22"
,
"python-dotenv"
]
ifeval
=
[
"langdetect"
,
"immutabledict"
,
"nltk>=3.9.1"
]
ifeval
=
[
"langdetect"
,
"immutabledict"
,
"nltk>=3.9.1"
]
ipex
=
["optimum"]
japanese_leaderboard
=
[
"emoji==2.14.0"
,
"neologdn==0.5.3"
,
"fugashi[unidic-lite]"
,
"rouge_score>=0.1.2"
]
longbench
=[
"jeiba"
,
"fuzzywuzzy"
,
"rouge"
]
longbench
=[
"jeiba"
,
"fuzzywuzzy"
,
"rouge"
]
neuronx
=
["optimum[neuronx]"]
mamba
=
[
"mamba_ssm"
,
"causal-conv1d==1.0.2"
]
mamba
=
[
"mamba_ssm"
,
"causal-conv1d==1.0.2"
]
math
=
[
"sympy>=1.12"
,
"antlr4-python3-runtime==4.11"
,
"math_verify[antlr4_11_0]"
]
math
=
[
"sympy>=1.12"
,
"antlr4-python3-runtime==4.11"
,
"math_verify[antlr4_11_0]"
]
multilingual
=
[
"nagisa>=0.2.7"
,
"jieba>=0.42.1"
,
"pycountry"
]
multilingual
=
[
"nagisa>=0.2.7"
,
"jieba>=0.42.1"
,
"pycountry"
]
neuronx
=
["optimum[neuronx]"]
optimum
=
["optimum[openvino]"]
optimum
=
["optimum[openvino]"]
promptsource
=
["promptsource>=0.2.3"]
promptsource
=
["promptsource>=0.2.3"]
ruler
=
[
"nltk"
,
"wonderwords"
,
"scipy"
]
ruler
=
[
"nltk"
,
"wonderwords"
,
"scipy"
]
sae_lens
=
["sae_lens"]
sae_lens
=
["sae_lens"]
sentencepiece
=
["sentencepiece>=0.1.98"]
sentencepiece
=
["sentencepiece>=0.1.98"]
sparsify
=
["sparsify"]
sparseml
=
["sparseml-nightly[llm]>=1.8.0.20240404"]
sparseml
=
["sparseml-nightly[llm]>=1.8.0.20240404"]
sparsify
=
["sparsify"]
testing
=
[
"pytest"
,
"pytest-cov"
,
"pytest-xdist"
]
testing
=
[
"pytest"
,
"pytest-cov"
,
"pytest-xdist"
]
vllm
=
["vllm>=0.4.2"]
vllm
=
["vllm>=0.4.2"]
zeno
=
[
"pandas"
,
"zeno-client"
]
wandb
=
[
"wandb>=0.16.3"
,
"pandas"
,
"numpy"
]
wandb
=
[
"wandb>=0.16.3"
,
"pandas"
,
"numpy"
]
gptqmodel
=
["gptqmodel>=1.0.9"]
zeno
=
[
"pandas"
,
"zeno-client"
]
japanese_leaderboard
=
[
"emoji==2.14.0"
,
"neologdn==0.5.3"
,
"fugashi[unidic-lite]"
,
"rouge_score>=0.1.2"
]
all
=
[
all
=
[
"lm_eval[a
nthro
pi
c
]"
,
"lm_eval[api]"
,
"lm_eval[
dev
]"
,
"lm_eval[
audiolm_qwen
]"
,
"lm_eval[deepsparse]"
,
"lm_eval[deepsparse]"
,
"lm_eval[dev]"
,
"lm_eval[gptq]"
,
"lm_eval[gptq]"
,
"lm_eval[gptqmodel]"
,
"lm_eval[hf_transfer]"
,
"lm_eval[hf_transfer]"
,
"lm_eval[ibm_watsonx_ai]"
,
"lm_eval[ibm_watsonx_ai]"
,
"lm_eval[ifeval]"
,
"lm_eval[ifeval]"
,
"lm_eval[ipex]"
,
"lm_eval[japanese_leaderboard]"
,
"lm_eval[longbench]"
,
"lm_eval[longbench]"
,
"lm_eval[mamba]"
,
"lm_eval[mamba]"
,
"lm_eval[math]"
,
"lm_eval[math]"
,
"lm_eval[multilingual]"
,
"lm_eval[multilingual]"
,
"lm_eval[openai]"
,
"lm_eval[neuronx]"
,
"lm_eval[optimum]"
,
"lm_eval[promptsource]"
,
"lm_eval[promptsource]"
,
"lm_eval[ruler]"
,
"lm_eval[ruler]"
,
"lm_eval[sae_lens]"
,
"lm_eval[sae_lens]"
,
"lm_eval[sentencepiece]"
,
"lm_eval[sentencepiece]"
,
"lm_eval[sparsify]"
,
"lm_eval[sparseml]"
,
"lm_eval[sparseml]"
,
"lm_eval[sparsify]"
,
"lm_eval[testing]"
,
"lm_eval[testing]"
,
"lm_eval[vllm]"
,
"lm_eval[vllm]"
,
"lm_eval[zeno]"
,
"lm_eval[wandb]"
,
"lm_eval[wandb]"
,
"lm_eval[
japanese_leaderboard
]"
,
"lm_eval[
zeno
]"
,
]
]
[tool.ruff.lint]
[tool.ruff.lint]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment