pyproject.toml 4.17 KB
Newer Older
1
2
3
4
5
6
[build-system]
requires = ["setuptools>=40.8.0", "wheel"]
build-backend = "setuptools.build_meta"

[project]
name = "lm_eval"
Baber Abbasi's avatar
Baber Abbasi committed
7
version = "0.4.9.1"
8
9
10
11
12
13
authors = [
    {name="EleutherAI", email="contact@eleuther.ai"}
]
description = "A framework for evaluating language models"
readme = "README.md"
classifiers = [
Baber's avatar
Baber committed
14
15
16
17
  "Development Status :: 3 - Alpha",
  "Programming Language :: Python :: 3",
  "License :: OSI Approved :: MIT License",
  "Operating System :: OS Independent"
18
]
Baber Abbasi's avatar
Baber Abbasi committed
19
requires-python = ">=3.9"
20
21
license = { "text" = "MIT" }
dependencies = [
Baber's avatar
Baber committed
22
23
24
25
26
27
28
29
30
31
32
33
34
35
  "accelerate>=0.26.0",
  "datasets>=2.16.0,<4.0",
  "evaluate>=0.4.0",
  "peft>=0.2.0",
  "pytablewriter",
  "rouge-score>=0.0.4",
  "sacrebleu>=1.5.0",
  "scikit-learn>=0.24.1",
  "sqlitedict",
  "torch>=1.8",
  "transformers>=4.1",
  "dill",
  "word2number",
  "more_itertools"
36
37
]

Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
38
[tool.setuptools.packages.find]
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
39
include = ["lm_eval*"]
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
40

41
42
# required to include yaml files in pip installation
[tool.setuptools.package-data]
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
43
lm_eval = ["**/*.yaml", "tasks/**/*"]
44
45

[project.scripts]
haileyschoelkopf's avatar
haileyschoelkopf committed
46
47
lm-eval = "lm_eval.__main__:cli_evaluate"
lm_eval = "lm_eval.__main__:cli_evaluate"
48
49
50
51
52
53

[project.urls]
Homepage = "https://github.com/EleutherAI/lm-evaluation-harness"
Repository = "https://github.com/EleutherAI/lm-evaluation-harness"

[project.optional-dependencies]
Harsha's avatar
Harsha committed
54
acpbench = ["lark>=1.1.9", "tarski[clingo]==0.8.2", "pddl==0.4.2", "kstar-planner==1.4.2"]
Baber Abbasi's avatar
Baber Abbasi committed
55
api = ["requests", "aiohttp", "tenacity", "tqdm", "tiktoken"]
56
audiolm_qwen = ["librosa", "soundfile"]
57
dev = ["pytest", "pytest-cov", "pytest-xdist", "pre-commit", "requests", "aiohttp", "tenacity", "tqdm", "tiktoken", "sentencepiece"]
58
gptq = ["auto-gptq[triton]>=0.6.0"]
59
gptqmodel = ["gptqmodel>=1.0.9"]
Michael Feil's avatar
Michael Feil committed
60
hf_transfer = ["hf_transfer"]
61
ibm_watsonx_ai = ["ibm_watsonx_ai>=1.1.22", "python-dotenv"]
62
ifeval = ["langdetect", "immutabledict", "nltk>=3.9.1"]
63
64
ipex = ["optimum"]
japanese_leaderboard = ["emoji==2.14.0", "neologdn==0.5.3", "fugashi[unidic-lite]", "rouge_score>=0.1.2"]
Baber's avatar
Baber committed
65
longbench = ["jieba", "fuzzywuzzy", "rouge"]
66
libra=["pymorphy2"]
67
mamba = ["mamba_ssm", "causal-conv1d==1.0.2", "torch"]
68
math = ["sympy>=1.12", "antlr4-python3-runtime==4.11", "math_verify[antlr4_11_0]"]
69
multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1", "pycountry"]
70
neuronx = ["optimum[neuronx]"]
71
optimum = ["optimum[openvino]"]
72
promptsource = ["promptsource>=0.2.3"]
Baber Abbasi's avatar
Baber Abbasi committed
73
ruler = ["nltk", "wonderwords", "scipy"]
74
sae_lens = ["sae_lens"]
75
sentencepiece = ["sentencepiece>=0.1.98"]
76
sparsify = ["sparsify"]
77
discrim_eval = ["statsmodels==0.14.4"]
78
testing = ["pytest", "pytest-cov", "pytest-xdist"]
79
unitxt = ["unitxt==1.22.0"]
80
vllm = ["vllm>=0.4.2"]
81
wandb = ["wandb>=0.16.3", "pandas", "numpy"]
82
zeno = ["pandas", "zeno-client"]
83
tasks = [
Baber's avatar
Baber committed
84
85
  "lm_eval[acpbench]",
  "lm_eval[discrim_eval]",
86
    "lm_eval[ifeval]",
Baber's avatar
Baber committed
87
88
  "lm_eval[japanese_leaderboard]",
  "lm_eval[longbench]",
89
90
    "lm_eval[libra]",
    "lm_eval[mamba]",
Baber's avatar
Baber committed
91
92
93
  "lm_eval[math]",
  "lm_eval[multilingual]",
  "lm_eval[ruler]"
94
]
Baber's avatar
Baber committed
95
96
97
98
99
100
101
102
103
104
105
106
107
testing = ["pytest", "pytest-cov", "pytest-xdist"]
unitxt = ["unitxt==1.22.0"]
vllm = ["vllm>=0.4.2"]
wandb = ["wandb>=0.16.3", "pandas", "numpy"]
zeno = ["pandas", "zeno-client"]

[project.scripts]
lm-eval = "lm_eval.__main__:cli_evaluate"
lm_eval = "lm_eval.__main__:cli_evaluate"

[project.urls]
Homepage = "https://github.com/EleutherAI/lm-evaluation-harness"
Repository = "https://github.com/EleutherAI/lm-evaluation-harness"
108

Kiersten Stokes's avatar
Kiersten Stokes committed
109
110
111
112
113
114
115
116
[tool.pymarkdown]
plugins.md013.enabled = false # line-length
plugins.md024.allow_different_nesting = true # no-duplicate-headers
plugins.md025.enabled = false # single-header
plugins.md028.enabled = false # no-blanks-blockquote
plugins.md029.allow_extended_start_values = true # ol-prefix
plugins.md034.enabled = false # no-bare-urls

Baber's avatar
Baber committed
117
118
119
[tool.ruff]
target-version = "py39"
lint.extend-select = ["I", "UP", "E", "C419", "F", "B", "SIM"]
Baber's avatar
Baber committed
120
121
122
123
124
lint.fixable = ["I001", "F401", "UP"]
lint.ignore = ["E402", "E731", "E501", "E111", "E114", "E117", "E741"]

[tool.ruff.lint.extend-per-file-ignores]
"__init__.py" = ["F401", "F402", "F403"]
125

126
[tool.ruff.lint.isort]
Baber's avatar
Baber committed
127
combine-as-imports = true
128
known-first-party = ["lm_eval"]
Baber's avatar
Baber committed
129
lines-after-imports = 2
130

Baber's avatar
Baber committed
131
132
133
# required to include yaml files in pip installation
[tool.setuptools.package-data]
lm_eval = ["**/*.yaml", "tasks/**/*"]
134

Baber's avatar
Baber committed
135
136
[tool.setuptools.packages.find]
include = ["lm_eval*"]