Commit cb02853e authored by Woosuk Kwon's avatar Woosuk Kwon
Browse files

Pin PyTorch and CUDA versions

parent 0e892ad6
...@@ -263,6 +263,9 @@ class NinjaBuildExtension(BuildExtension): ...@@ -263,6 +263,9 @@ class NinjaBuildExtension(BuildExtension):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
PYTORCH_VERSION = "2.2.1"
CUDA_VERSION = "12.1"
setup( setup(
name="vllm-flash-attn", name="vllm-flash-attn",
version=get_package_version(), version=get_package_version(),
...@@ -280,7 +283,7 @@ setup( ...@@ -280,7 +283,7 @@ setup(
), ),
author="vLLM Team", author="vLLM Team",
description="Forward-only flash-attn", description="Forward-only flash-attn",
long_description="Forward-only flash-attn package built for PyTorch 2.1.2 and CUDA 12.1", long_description=f"Forward-only flash-attn package built for PyTorch {PYTORCH_VERSION} and CUDA {CUDA_VERSION}",
url="https://github.com/vllm-project/flash-attention.git", url="https://github.com/vllm-project/flash-attention.git",
classifiers=[ classifiers=[
"Programming Language :: Python :: 3", "Programming Language :: Python :: 3",
...@@ -294,6 +297,6 @@ setup( ...@@ -294,6 +297,6 @@ setup(
"bdist_wheel": CachedWheelsCommand, "bdist_wheel": CachedWheelsCommand,
}, },
python_requires=">=3.8", python_requires=">=3.8",
install_requires=["torch == 2.2.1"], install_requires=[f"torch == {PYTORCH_VERSION}"],
setup_requires=["psutil"], setup_requires=["psutil"],
) )
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment