Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
5d4ac134
Unverified
Commit
5d4ac134
authored
Sep 14, 2023
by
Hailey Schoelkopf
Committed by
GitHub
Sep 14, 2023
Browse files
Merge pull request #854 from ethanhs/pyproject_toml_v2
Switch to pyproject.toml based project metadata
parents
6ba2a2b0
0594fe2b
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
85 additions
and
74 deletions
+85
-74
pyproject.toml
pyproject.toml
+83
-0
setup.py
setup.py
+2
-74
No files found.
pyproject.toml
View file @
5d4ac134
[build-system]
[build-system]
requires
=
[
"setuptools>=40.8.0"
,
"wheel"
]
requires
=
[
"setuptools>=40.8.0"
,
"wheel"
]
build-backend
=
"setuptools.build_meta"
build-backend
=
"setuptools.build_meta"
[project]
name
=
"lm_eval"
version
=
"1.0.0"
authors
=
[
{name=
"EleutherAI"
,
email=
"contact@eleuther.ai"
}
]
description
=
"A framework for evaluating language models"
readme
=
"README.md"
classifiers
=
[
"Development Status :: 3 - Alpha"
,
"Programming Language :: Python :: 3"
,
"License :: OSI Approved :: MIT License"
,
"Operating System :: OS Independent"
,
]
requires-python
=
">=3.9"
license
=
{
"text"
=
"MIT"
}
dependencies
=
[
"accelerate>=0.21.0"
,
"evaluate"
,
"datasets>=2.0.0"
,
"evaluate>=0.4.0"
,
"jsonlines"
,
"numexpr"
,
"omegaconf>=2.2"
,
"peft>=0.2.0"
,
"pybind11>=2.6.2"
,
"pycountry"
,
"pytablewriter"
,
"rouge-score>=0.0.4"
,
"sacrebleu>=1.5.0"
,
"scikit-learn>=0.24.1"
,
"sqlitedict"
,
"torch>=1.8"
,
"tqdm-multiprocess"
,
"transformers>=4.1"
,
"zstandard"
,
]
[tool.setuptools]
packages
=
["lm_eval"]
# required to include yaml files in pip installation
[tool.setuptools.package-data]
lm_eval
=
[
"**/*.yaml"
,
"tasks/**/*"
]
examples
=
["**/*.yaml"]
[project.scripts]
lm-eval
=
"main:main"
lm_eval
=
"main:main"
[project.urls]
Homepage
=
"https://github.com/EleutherAI/lm-evaluation-harness"
Repository
=
"https://github.com/EleutherAI/lm-evaluation-harness"
[project.optional-dependencies]
dev
=
[
"black"
,
"flake8"
,
"pre-commit"
,
"pytest"
,
"pytest-cov"
]
linting
=
[
"flake8"
,
"pylint"
,
"mypy"
,
"pre-commit"
,
]
testing
=
[
"pytest"
,
"pytest-cov"
,
"pytest-xdist"
]
multilingual
=
[
"nagisa>=0.2.7"
,
"jieba>=0.42.1"
]
sentencepiece
=
[
"sentencepiece>=0.1.98"
,
"protobuf>=4.22.1"
,
"pycountry"
]
promptsource
=
[
"promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource"
]
gptq
=
["auto-gptq[triton]
@
git+https://github.com/PanQiWei/AutoGPTQ
"]
anthropic
=
["anthropic"]
openai
=
[
"openai"
,
"tiktoken"
]
all
=
[
"lm_eval[dev]"
,
"lm_eval[testing]"
,
"lm_eval[linting]"
,
"lm_eval[multilingual]"
,
"lm_eval[sentencepiece]"
,
"lm_eval[promptsource]"
,
"lm_eval[gptq]"
,
"lm_eval[anthropic]"
,
"lm_eval[openai]"
]
setup.py
View file @
5d4ac134
import
setuptools
import
setuptools
import
itertools
with
open
(
"README.md"
,
"r"
,
encoding
=
"utf-8"
)
as
fh
:
# This is to make sure that the package supports editable installs
long_description
=
fh
.
read
()
setuptools
.
setup
()
extras_require
=
{
"dev"
:
[
"black"
,
"flake8"
,
"pre-commit"
,
"pytest"
,
"pytest-cov"
],
"linting"
:
[
"flake8"
,
"pylint"
,
"mypy"
,
"pre-commit"
,
],
"testing"
:
[
"pytest"
,
"pytest-cov"
,
"pytest-xdist"
],
"multilingual"
:
[
"nagisa>=0.2.7"
,
"jieba>=0.42.1"
],
"sentencepiece"
:
[
"sentencepiece>=0.1.98"
,
"protobuf>=4.22.1"
,
"pycountry"
],
"promptsource"
:
[
"promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource"
],
"gptq"
:
[
"auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"
],
"anthropic"
:
[
"anthropic"
],
"openai"
:
[
"openai"
,
"tiktoken"
],
}
extras_require
[
"all"
]
=
list
(
itertools
.
chain
.
from_iterable
(
extras_require
.
values
()))
setuptools
.
setup
(
name
=
"lm_eval"
,
version
=
"1.0.0"
,
author
=
"EleutherAI"
,
author_email
=
"contact@eleuther.ai"
,
description
=
"A framework for evaluating language models"
,
long_description
=
long_description
,
long_description_content_type
=
"text/markdown"
,
url
=
"https://github.com/EleutherAI/lm-evaluation-harness"
,
packages
=
setuptools
.
find_packages
(),
# required to include yaml files in pip installation
package_data
=
{
"lm_eval"
:
[
"**/*.yaml"
,
"tasks/**/*"
],
"examples"
:
[
"**/*.yaml"
],
},
entry_points
=
{
"console_scripts"
:
[
"lm-eval = main:main"
,
"lm_eval = main:main"
],
},
include_package_data
=
True
,
classifiers
=
[
"Development Status :: 3 - Alpha"
,
"Programming Language :: Python :: 3"
,
"License :: OSI Approved :: MIT License"
,
"Operating System :: OS Independent"
,
],
python_requires
=
">=3.9"
,
install_requires
=
[
"accelerate>=0.21.0"
,
"evaluate"
,
"datasets>=2.0.0"
,
"evaluate>=0.4.0"
,
"jsonlines"
,
"numexpr"
,
"omegaconf>=2.2"
,
"peft>=0.2.0"
,
"pybind11>=2.6.2"
,
"pytablewriter"
,
"rouge-score>=0.0.4"
,
"sacrebleu>=1.5.0"
,
"scikit-learn>=0.24.1"
,
"sqlitedict"
,
"torch>=1.8"
,
"tqdm-multiprocess"
,
"transformers>=4.1"
,
"zstandard"
,
],
extras_require
=
extras_require
,
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment