Unverified Commit 3e69e8fc authored by Zoey's avatar Zoey Committed by GitHub
Browse files

Bump CUDA 12.2.0 to 12.2.1, fix setup support for Cuda 12.1 (#703), Sort...


Bump CUDA 12.2.0 to 12.2.1, fix setup support for Cuda 12.1 (#703), Sort compute capabilities sets to select max

* Add support for CUDA 12.1

* Update README to include CUDA 12.1 version

* Add support for >= 12x
Co-authored-by: default avatarJeongseok Kang <jskang@lablup.com>

* Temporary version of bitsandbytes PR 527: Sort compute capabilities sets to select max

* Modify PR 506 to support C++20

* Add Cuda 12.2

---------
Co-authored-by: default avatarPriNova <info@prinova.de>
Co-authored-by: default avatarPriNova <31413214+PriNova@users.noreply.github.com>
Co-authored-by: default avatarJeongseok Kang <jskang@lablup.com>
parent dcfb6f81
...@@ -110,7 +110,7 @@ cuda118: $(BUILD_DIR) env ...@@ -110,7 +110,7 @@ cuda118: $(BUILD_DIR) env
cuda12x: $(BUILD_DIR) env cuda12x: $(BUILD_DIR) env
$(NVCC) $(CC_cublasLt111) $(CC_ADA_HOPPER) -Xcompiler '-fPIC' --use_fast_math -Xptxas=-v -dc $(FILES_CUDA) $(INCLUDE) $(LIB) --output-directory $(BUILD_DIR) $(NVCC) $(CC_cublasLt111) $(CC_ADA_HOPPER) -Xcompiler '-fPIC' --use_fast_math -Xptxas=-v -dc $(FILES_CUDA) $(INCLUDE) $(LIB) --output-directory $(BUILD_DIR)
$(NVCC) $(CC_cublasLt111) $(CC_ADA_HOPPER) -Xcompiler '-fPIC' -dlink $(BUILD_DIR)/ops.o $(BUILD_DIR)/kernels.o -o $(BUILD_DIR)/link.o $(NVCC) $(CC_cublasLt111) $(CC_ADA_HOPPER) -Xcompiler '-fPIC' -dlink $(BUILD_DIR)/ops.o $(BUILD_DIR)/kernels.o -o $(BUILD_DIR)/link.o
$(GPP) -std=c++14 -DBUILD_CUDA -shared -fPIC $(INCLUDE) $(BUILD_DIR)/ops.o $(BUILD_DIR)/kernels.o $(BUILD_DIR)/link.o $(FILES_CPP) -o ./bitsandbytes/libbitsandbytes_cuda$(CUDA_VERSION).so $(LIB) $(GPP) -std=c++20 -DBUILD_CUDA -shared -fPIC $(INCLUDE) $(BUILD_DIR)/ops.o $(BUILD_DIR)/kernels.o $(BUILD_DIR)/link.o $(FILES_CPP) -o ./bitsandbytes/libbitsandbytes_cuda$(CUDA_VERSION).so $(LIB)
cpuonly: $(BUILD_DIR) env cpuonly: $(BUILD_DIR) env
$(GPP) -std=c++14 -shared -fPIC -I $(ROOT_DIR)/csrc -I $(ROOT_DIR)/include $(FILES_CPP) -o ./bitsandbytes/libbitsandbytes_cpu.so $(GPP) -std=c++14 -shared -fPIC -I $(ROOT_DIR)/csrc -I $(ROOT_DIR)/include $(FILES_CPP) -o ./bitsandbytes/libbitsandbytes_cpu.so
......
...@@ -26,7 +26,7 @@ Compilation quickstart: ...@@ -26,7 +26,7 @@ Compilation quickstart:
git clone https://github.com/timdettmers/bitsandbytes.git git clone https://github.com/timdettmers/bitsandbytes.git
cd bitsandbytes cd bitsandbytes
# CUDA_VERSIONS in {110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 120} # CUDA_VERSIONS in {110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}
# make argument in {cuda110, cuda11x, cuda12x} # make argument in {cuda110, cuda11x, cuda12x}
# if you do not know what CUDA you have, try looking at the output of: python -m bitsandbytes # if you do not know what CUDA you have, try looking at the output of: python -m bitsandbytes
CUDA_VERSION=117 make cuda11x CUDA_VERSION=117 make cuda11x
...@@ -83,7 +83,7 @@ Hardware requirements: ...@@ -83,7 +83,7 @@ Hardware requirements:
- LLM.int8(): NVIDIA Turing (RTX 20xx; T4) or Ampere GPU (RTX 30xx; A4-A100); (a GPU from 2018 or newer). - LLM.int8(): NVIDIA Turing (RTX 20xx; T4) or Ampere GPU (RTX 30xx; A4-A100); (a GPU from 2018 or newer).
- 8-bit optimizers and quantization: NVIDIA Kepler GPU or newer (>=GTX 78X). - 8-bit optimizers and quantization: NVIDIA Kepler GPU or newer (>=GTX 78X).
Supported CUDA versions: 10.2 - 12.0 Supported CUDA versions: 10.2 - 12.2
The bitsandbytes library is currently only supported on Linux distributions. Windows is not supported at the moment. The bitsandbytes library is currently only supported on Linux distributions. Windows is not supported at the moment.
......
...@@ -31,7 +31,7 @@ from .env_vars import get_potentially_lib_path_containing_env_vars ...@@ -31,7 +31,7 @@ from .env_vars import get_potentially_lib_path_containing_env_vars
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
# we have libcudart.so.11.0 which causes a lot of errors before # we have libcudart.so.11.0 which causes a lot of errors before
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0'] CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths # this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
backup_paths = [] backup_paths = []
...@@ -77,6 +77,8 @@ class CUDASetup: ...@@ -77,6 +77,8 @@ class CUDASetup:
make_cmd += ' make cuda110' make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0: elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x' make_cmd += ' make cuda11x'
elif self.cuda_version_string[:2] == '12' and 1 >= int(self.cuda_version_string[2]) >= 0:
make_cmd += ' make cuda12x'
elif self.cuda_version_string == '100': elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.') self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.') self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
...@@ -327,6 +329,8 @@ def get_compute_capabilities(): ...@@ -327,6 +329,8 @@ def get_compute_capabilities():
cc_major, cc_minor = torch.cuda.get_device_capability(torch.cuda.device(i)) cc_major, cc_minor = torch.cuda.get_device_capability(torch.cuda.device(i))
ccs.append(f"{cc_major}.{cc_minor}") ccs.append(f"{cc_major}.{cc_minor}")
ccs.sort(key=lambda v: tuple(map(int, str(v).split("."))))
return ccs return ccs
......
...@@ -11,7 +11,7 @@ You can install CUDA locally without sudo by following the following steps: ...@@ -11,7 +11,7 @@ You can install CUDA locally without sudo by following the following steps:
```bash ```bash
wget https://raw.githubusercontent.com/TimDettmers/bitsandbytes/main/install_cuda.sh wget https://raw.githubusercontent.com/TimDettmers/bitsandbytes/main/install_cuda.sh
# Syntax cuda_install CUDA_VERSION INSTALL_PREFIX EXPORT_TO_BASH # Syntax cuda_install CUDA_VERSION INSTALL_PREFIX EXPORT_TO_BASH
# CUDA_VERSION in {110, 111, 112, 113, 114, 115, 116, 117, 118, 120, 121} # CUDA_VERSION in {110, 111, 112, 113, 114, 115, 116, 117, 118, 120, 121, 122}
# EXPORT_TO_BASH in {0, 1} with 0=False and 1=True # EXPORT_TO_BASH in {0, 1} with 0=False and 1=True
# For example, the following installs CUDA 11.7 to ~/local/cuda-11.7 and exports the path to your .bashrc # For example, the following installs CUDA 11.7 to ~/local/cuda-11.7 and exports the path to your .bashrc
......
...@@ -12,8 +12,8 @@ URL116=https://developer.download.nvidia.com/compute/cuda/11.6.2/local_installer ...@@ -12,8 +12,8 @@ URL116=https://developer.download.nvidia.com/compute/cuda/11.6.2/local_installer
URL117=https://developer.download.nvidia.com/compute/cuda/11.7.0/local_installers/cuda_11.7.0_515.43.04_linux.run URL117=https://developer.download.nvidia.com/compute/cuda/11.7.0/local_installers/cuda_11.7.0_515.43.04_linux.run
URL118=https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run URL118=https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
URL120=https://developer.download.nvidia.com/compute/cuda/12.0.0/local_installers/cuda_12.0.0_525.60.13_linux.run URL120=https://developer.download.nvidia.com/compute/cuda/12.0.0/local_installers/cuda_12.0.0_525.60.13_linux.run
URL121=https://developer.download.nvidia.com/compute/cuda/12.1.0/local_installers/cuda_12.1.0_530.30.02_linux.run URL121=https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run
URL122=https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run URL122=https://developer.download.nvidia.com/compute/cuda/12.2.1/local_installers/cuda_12.2.1_535.86.10_linux.run
URL123=https://developer.download.nvidia.com/compute/cuda/12.3.1/local_installers/cuda_12.3.1_545.23.08_linux.run URL123=https://developer.download.nvidia.com/compute/cuda/12.3.1/local_installers/cuda_12.3.1_545.23.08_linux.run
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment