pyproject.toml 2.65 KB
Newer Older
1
2
3
4
5
6
[build-system]
requires = ["setuptools>=40.8.0", "wheel"]
build-backend = "setuptools.build_meta"

[project]
name = "lm_eval"
7
version = "0.4.1"
8
9
10
11
12
13
14
15
16
17
18
authors = [
    {name="EleutherAI", email="contact@eleuther.ai"}
]
description = "A framework for evaluating language models"
readme = "README.md"
classifiers = [
    "Development Status :: 3 - Alpha",
    "Programming Language :: Python :: 3",
    "License :: OSI Approved :: MIT License",
    "Operating System :: OS Independent",
]
19
requires-python = ">=3.8"
20
21
22
23
license = { "text" = "MIT" }
dependencies = [
    "accelerate>=0.21.0",
    "evaluate",
24
    "datasets>=2.14.0",
25
26
27
28
29
30
31
32
33
34
35
36
37
38
    "evaluate>=0.4.0",
    "jsonlines",
    "numexpr",
    "peft>=0.2.0",
    "pybind11>=2.6.2",
    "pytablewriter",
    "rouge-score>=0.0.4",
    "sacrebleu>=1.5.0",
    "scikit-learn>=0.24.1",
    "sqlitedict",
    "torch>=1.8",
    "tqdm-multiprocess",
    "transformers>=4.1",
    "zstandard",
39
    "dill",
40
    "word2number",
41
    "more_itertools",
42
43
]

Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
44
[tool.setuptools.packages.find]
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
45
include = ["lm_eval*"]
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
46

47
48
# required to include yaml files in pip installation
[tool.setuptools.package-data]
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
49
lm_eval = ["**/*.yaml", "tasks/**/*"]
50
51

[project.scripts]
haileyschoelkopf's avatar
haileyschoelkopf committed
52
53
lm-eval = "lm_eval.__main__:cli_evaluate"
lm_eval = "lm_eval.__main__:cli_evaluate"
54
55
56
57
58
59

[project.urls]
Homepage = "https://github.com/EleutherAI/lm-evaluation-harness"
Repository = "https://github.com/EleutherAI/lm-evaluation-harness"

[project.optional-dependencies]
60
anthropic = ["anthropic"]
61
dev = ["pytest", "pytest-cov", "pytest-xdist", "pre-commit", "mypy"]
62
gptq = ["auto-gptq[triton]>=0.6.0"]
Michael Feil's avatar
Michael Feil committed
63
hf_transfer = ["hf_transfer"]
64
ifeval = ["langdetect", "immutabledict"]
65
neuronx = ["optimum[neuronx]"]
66
mamba = ["mamba_ssm", "causal-conv1d==1.0.2"]
67
math = ["sympy>=1.12", "antlr4-python3-runtime==4.11"]
68
69
multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1", "pycountry"]
openai = ["openai==1.3.9", "tiktoken"]
70
optimum = ["optimum[openvino]"]
71
promptsource = ["promptsource>=0.2.3"]
72
73
sentencepiece = ["sentencepiece>=0.1.98", "protobuf>=4.22.1"]
testing = ["pytest", "pytest-cov", "pytest-xdist"]
Baber Abbasi's avatar
Baber Abbasi committed
74
vllm = ["vllm==0.3.2"]
75
zeno = ["pandas", "zeno-client"]
76
wandb = ["wandb>=0.16.3", "pandas", "numpy"]
77
all = [
78
    "lm_eval[anthropic]",
79
    "lm_eval[dev]",
80
    "lm_eval[gptq]",
Michael Feil's avatar
Michael Feil committed
81
    "lm_eval[hf_transfer]",
82
83
84
    "lm_eval[ifeval]",
    "lm_eval[mamba]",
    "lm_eval[math]",
85
    "lm_eval[multilingual]",
baberabb's avatar
baberabb committed
86
    "lm_eval[openai]",
87
88
89
    "lm_eval[promptsource]",
    "lm_eval[sentencepiece]",
    "lm_eval[testing]",
baberabb's avatar
baberabb committed
90
    "lm_eval[vllm]",
91
    "lm_eval[zeno]",
92
    "lm_eval[wandb]",
93
]
94
95
96
97
98
99
100
101
102

[tool.ruff.lint]
extend-select = ["I"]

[tool.ruff.isort]
lines-after-imports = 2
known-first-party = ["lm_eval"]

[tool.ruff.extend-per-file-ignores]
103
"__init__.py" = ["F401","F402","F403"]