pyproject.toml 3.99 KB
Newer Older
1
2
3
4
5
6
[build-system]
requires = ["setuptools>=40.8.0", "wheel"]
build-backend = "setuptools.build_meta"

[project]
name = "lm_eval"
7
version = "0.4.8"
8
9
10
11
12
13
14
15
16
17
18
authors = [
    {name="EleutherAI", email="contact@eleuther.ai"}
]
description = "A framework for evaluating language models"
readme = "README.md"
classifiers = [
    "Development Status :: 3 - Alpha",
    "Programming Language :: Python :: 3",
    "License :: OSI Approved :: MIT License",
    "Operating System :: OS Independent",
]
Baber Abbasi's avatar
Baber Abbasi committed
19
requires-python = ">=3.9"
20
21
license = { "text" = "MIT" }
dependencies = [
22
    "accelerate>=0.26.0",
23
    "evaluate",
24
    "datasets>=2.16.0",
25
26
27
28
    "evaluate>=0.4.0",
    "jsonlines",
    "numexpr",
    "peft>=0.2.0",
29
    "pydantic",
30
31
32
33
34
35
36
37
38
39
    "pybind11>=2.6.2",
    "pytablewriter",
    "rouge-score>=0.0.4",
    "sacrebleu>=1.5.0",
    "scikit-learn>=0.24.1",
    "sqlitedict",
    "torch>=1.8",
    "tqdm-multiprocess",
    "transformers>=4.1",
    "zstandard",
40
    "dill",
41
    "word2number",
42
    "more_itertools",
43
44
]

Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
45
[tool.setuptools.packages.find]
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
46
include = ["lm_eval*"]
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
47

48
49
# required to include yaml files in pip installation
[tool.setuptools.package-data]
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
50
lm_eval = ["**/*.yaml", "tasks/**/*"]
51
52

[project.scripts]
haileyschoelkopf's avatar
haileyschoelkopf committed
53
54
lm-eval = "lm_eval.__main__:cli_evaluate"
lm_eval = "lm_eval.__main__:cli_evaluate"
55
56
57
58
59
60

[project.urls]
Homepage = "https://github.com/EleutherAI/lm-evaluation-harness"
Repository = "https://github.com/EleutherAI/lm-evaluation-harness"

[project.optional-dependencies]
Baber Abbasi's avatar
Baber Abbasi committed
61
api = ["requests", "aiohttp", "tenacity", "tqdm", "tiktoken"]
62
audiolm_qwen = ["librosa", "soundfile"]
63
deepsparse = ["deepsparse-nightly[llm]>=1.8.0.20240404"]
64
dev = ["pytest", "pytest-cov", "pytest-xdist", "pre-commit", "mypy", "unitxt", "requests", "aiohttp", "tenacity", "tqdm", "tiktoken", "sentencepiece"]
65
gptq = ["auto-gptq[triton]>=0.6.0"]
66
gptqmodel = ["gptqmodel>=1.0.9"]
Michael Feil's avatar
Michael Feil committed
67
hf_transfer = ["hf_transfer"]
68
ibm_watsonx_ai = ["ibm_watsonx_ai>=1.1.22", "python-dotenv"]
69
ifeval = ["langdetect", "immutabledict", "nltk>=3.9.1"]
70
71
ipex = ["optimum"]
japanese_leaderboard = ["emoji==2.14.0", "neologdn==0.5.3", "fugashi[unidic-lite]", "rouge_score>=0.1.2"]
72
longbench=["jieba", "fuzzywuzzy", "rouge"]
73
mamba = ["mamba_ssm", "causal-conv1d==1.0.2", "torch"]
74
math = ["sympy>=1.12", "antlr4-python3-runtime==4.11", "math_verify[antlr4_11_0]"]
75
multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1", "pycountry"]
76
neuronx = ["optimum[neuronx]"]
77
optimum = ["optimum[openvino]"]
78
promptsource = ["promptsource>=0.2.3"]
Baber Abbasi's avatar
Baber Abbasi committed
79
ruler = ["nltk", "wonderwords", "scipy"]
80
sae_lens = ["sae_lens"]
81
82
sentencepiece = ["sentencepiece>=0.1.98"]
sparseml = ["sparseml-nightly[llm]>=1.8.0.20240404"]
83
sparsify = ["sparsify"]
84
testing = ["pytest", "pytest-cov", "pytest-xdist"]
85
vllm = ["vllm>=0.4.2"]
86
wandb = ["wandb>=0.16.3", "pandas", "numpy"]
87
zeno = ["pandas", "zeno-client"]
88
all = [
89
90
    "lm_eval[api]",
    "lm_eval[audiolm_qwen]",
91
    "lm_eval[deepsparse]",
92
    "lm_eval[dev]",
93
    "lm_eval[gptq]",
94
    "lm_eval[gptqmodel]",
Michael Feil's avatar
Michael Feil committed
95
    "lm_eval[hf_transfer]",
96
    "lm_eval[ibm_watsonx_ai]",
97
    "lm_eval[ifeval]",
98
99
    "lm_eval[ipex]",
    "lm_eval[japanese_leaderboard]",
Baber Abbasi's avatar
Baber Abbasi committed
100
    "lm_eval[longbench]",
101
102
    "lm_eval[mamba]",
    "lm_eval[math]",
103
    "lm_eval[multilingual]",
104
105
    "lm_eval[neuronx]",
    "lm_eval[optimum]",
106
    "lm_eval[promptsource]",
Baber Abbasi's avatar
Baber Abbasi committed
107
    "lm_eval[ruler]",
108
    "lm_eval[sae_lens]",
109
    "lm_eval[sentencepiece]",
110
    "lm_eval[sparseml]",
111
    "lm_eval[sparsify]",
112
    "lm_eval[testing]",
baberabb's avatar
baberabb committed
113
    "lm_eval[vllm]",
114
    "lm_eval[wandb]",
115
    "lm_eval[zeno]",
116
]
117

Kiersten Stokes's avatar
Kiersten Stokes committed
118
119
120
121
122
123
124
125
[tool.pymarkdown]
plugins.md013.enabled = false # line-length
plugins.md024.allow_different_nesting = true # no-duplicate-headers
plugins.md025.enabled = false # single-header
plugins.md028.enabled = false # no-blanks-blockquote
plugins.md029.allow_extended_start_values = true # ol-prefix
plugins.md034.enabled = false # no-bare-urls

126
127
128
[tool.ruff.lint]
extend-select = ["I"]

129
[tool.ruff.lint.isort]
130
131
132
lines-after-imports = 2
known-first-party = ["lm_eval"]

133
[tool.ruff.lint.extend-per-file-ignores]
134
"__init__.py" = ["F401","F402","F403"]
135
"utils.py" = ["F401"]
136
137
138
139
140

[dependency-groups]
dev = [
  "api","dev","sentencepiece"
]