Commit 94dce753 authored by haileyschoelkopf's avatar haileyschoelkopf
Browse files

add .[all] install option

parent 4a7bbcb4
...@@ -33,7 +33,6 @@ To install the `lm-eval` refactor branch from the github repository, run: ...@@ -33,7 +33,6 @@ To install the `lm-eval` refactor branch from the github repository, run:
```bash ```bash
git clone https://github.com/EleutherAI/lm-evaluation-harness git clone https://github.com/EleutherAI/lm-evaluation-harness
cd lm-evaluation-harness cd lm-evaluation-harness
git checkout big-refactor
pip install -e . pip install -e .
``` ```
...@@ -49,6 +48,13 @@ To support loading GPTQ quantized models, install the package with the `gptq` ex ...@@ -49,6 +48,13 @@ To support loading GPTQ quantized models, install the package with the `gptq` ex
pip install -e ".[gptq]" pip install -e ".[gptq]"
``` ```
To install the package with all extras, run
```bash
pip install -e ".[all]"
```
## Support ## Support
The best way to get support is to open an issue on this repo or join the EleutherAI discord server](discord.gg/eleutherai). The `#lm-thunderdome` channel is dedicated to developing this project and the `#release-discussion` channel is for receiving support for our releases. The best way to get support is to open an issue on this repo or join the EleutherAI discord server](discord.gg/eleutherai). The `#lm-thunderdome` channel is dedicated to developing this project and the `#release-discussion` channel is for receiving support for our releases.
......
import setuptools import setuptools
import itertools
with open("README.md", "r", encoding="utf-8") as fh: with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read() long_description = fh.read()
extras_require = {
"dev": ["black", "flake8", "pre-commit", "pytest", "pytest-cov"],
"linting": [
"flake8",
"pylint",
"mypy",
"pre-commit",
],
"testing": ["pytest", "pytest-cov", "pytest-xdist"],
"multilingual": ["nagisa>=0.2.7", "jieba>=0.42.1"],
"sentencepiece": ["sentencepiece>=0.1.98", "protobuf>=4.22.1"],
"promptsource": [
"promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource"
],
"gptq": ["auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"],
"anthropic": ["anthropic"],
"openai": ["openai", "tiktoken"],
}
extras_require["all"] = list(itertools.chain.from_iterable(extras_require.values()))
setuptools.setup( setuptools.setup(
name="lm_eval", name="lm_eval",
version="1.0.0", version="1.0.0",
...@@ -50,22 +73,5 @@ setuptools.setup( ...@@ -50,22 +73,5 @@ setuptools.setup(
"transformers>=4.1", "transformers>=4.1",
"zstandard", "zstandard",
], ],
extras_require={ extras_require=extras_require,
"dev": ["black", "flake8", "pre-commit", "pytest", "pytest-cov"],
"linting": [
"flake8",
"pylint",
"mypy",
"pre-commit",
],
"testing": ["pytest", "pytest-cov", "pytest-xdist"],
"multilingual": ["nagisa>=0.2.7", "jieba>=0.42.1"],
"sentencepiece": ["sentencepiece>=0.1.98", "protobuf>=4.22.1"],
"promptsource": [
"promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource"
],
"gptq": ["auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"],
"anthropic": ["anthropic"],
"openai": ["openai", "tiktoken"],
},
) )
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment