"docs/git@developer.sourcefind.cn:OpenDAS/lmdeploy.git" did not exist on "ec034c15023ca0412a91aeddd8aad164e155b695"
Commit 9fc0ab41 authored by Max Ryabinin's avatar Max Ryabinin
Browse files

Remove unused code

parent 9d60b3c5
import operator import operator
import torch import torch
import bitsandbytes as bnb
import bitsandbytes.functional as F import bitsandbytes.functional as F
from dataclasses import dataclass from dataclasses import dataclass
...@@ -378,9 +377,6 @@ class MatMul8bitLt(torch.autograd.Function): ...@@ -378,9 +377,6 @@ class MatMul8bitLt(torch.autograd.Function):
return grad_A, grad_B, None, grad_bias, None return grad_A, grad_B, None, grad_bias, None
matmul = MatMul8bitLt.apply
def matmul( def matmul(
A: tensor, A: tensor,
B: tensor, B: tensor,
......
import ctypes
from dataclasses import dataclass, field
@dataclass
class CudaLibVals:
# code bits taken from
# https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
nGpus: ctypes.c_int = field(default=ctypes.c_int())
cc_major: ctypes.c_int = field(default=ctypes.c_int())
cc_minor: ctypes.c_int = field(default=ctypes.c_int())
device: ctypes.c_int = field(default=ctypes.c_int())
error_str: ctypes.c_char_p = field(default=ctypes.c_char_p())
cuda: ctypes.CDLL = field(init=False, repr=False)
ccs: List[str, ...] = field(init=False)
def _initialize_driver_API(self):
self.check_cuda_result(self.cuda.cuInit(0))
def _load_cuda_lib(self):
"""
1. find libcuda.so library (GPU driver) (/usr/lib)
init_device -> init variables -> call function by reference
"""
libnames = "libcuda.so"
for libname in libnames:
try:
self.cuda = ctypes.CDLL(libname)
except OSError:
continue
else:
break
else:
raise OSError("could not load any of: " + " ".join(libnames))
def call_cuda_func(self, function_obj, **kwargs):
CUDA_SUCCESS = 0 # constant taken from cuda.h
pass
# if (CUDA_SUCCESS := function_obj(
def _error_handle(cuda_lib_call_return_value):
"""
2. call extern C function to determine CC
(see https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
"""
CUDA_SUCCESS = 0 # constant taken from cuda.h
if cuda_lib_call_return_value != CUDA_SUCCESS:
self.cuda.cuGetErrorString(
cuda_lib_call_return_value,
ctypes.byref(self.error_str),
)
print("Count not initialize CUDA - failure!")
raise Exception("CUDA exception!")
return cuda_lib_call_return_value
def __post_init__(self):
self._load_cuda_lib()
self._initialize_driver_API()
self.check_cuda_result(
self.cuda, self.cuda.cuDeviceGetCount(ctypes.byref(self.nGpus))
)
tmp_ccs = []
for gpu_index in range(self.nGpus.value):
check_cuda_result(
self.cuda,
self.cuda.cuDeviceGet(ctypes.byref(self.device), gpu_index),
)
check_cuda_result(
self.cuda,
self.cuda.cuDeviceComputeCapability(
ctypes.byref(self.cc_major),
ctypes.byref(self.cc_minor),
self.device,
),
)
tmp_ccs.append(f"{self.cc_major.value}.{self.cc_minor.value}")
self.ccs = sorted(tmp_ccs, reverse=True)
...@@ -17,9 +17,7 @@ evaluation: ...@@ -17,9 +17,7 @@ evaluation:
""" """
import ctypes import ctypes
from pathlib import Path
from ..utils import execute_and_return
from .paths import determine_cuda_runtime_lib_path from .paths import determine_cuda_runtime_lib_path
...@@ -80,7 +78,6 @@ def get_compute_capabilities(cuda): ...@@ -80,7 +78,6 @@ def get_compute_capabilities(cuda):
cc_major = ctypes.c_int() cc_major = ctypes.c_int()
cc_minor = ctypes.c_int() cc_minor = ctypes.c_int()
result = ctypes.c_int()
device = ctypes.c_int() device = ctypes.c_int()
check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus))) check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus)))
......
...@@ -5,20 +5,9 @@ from warnings import warn ...@@ -5,20 +5,9 @@ from warnings import warn
from ..utils import print_stderr from ..utils import print_stderr
from .env_vars import get_potentially_lib_path_containing_env_vars from .env_vars import get_potentially_lib_path_containing_env_vars
CUDA_RUNTIME_LIB: str = "libcudart.so" CUDA_RUNTIME_LIB: str = "libcudart.so"
def purge_unwanted_semicolon(tentative_path: Path) -> Path:
"""
Special function to handle the following exception:
__LMOD_REF_COUNT_PATH=/sw/cuda/11.6.2/bin:2;/mmfs1/home/dettmers/git/sched/bin:1;/mmfs1/home/dettmers/data/anaconda3/bin:1;/mmfs1/home/dettmers/data/anaconda3/condabin:1;/mmfs1/home/dettmers/.local/bin:1;/mmfs1/home/dettmers/bin:1;/usr/local/bin:1;/usr/bin:1;/usr/local/sbin:1;/usr/sbin:1;/mmfs1/home/dettmers/.fzf/bin:1;/mmfs1/home/dettmers/data/local/cuda-11.4/bin:1
"""
# if ';' in str(tentative_path):
# path_as_str, _ = str(tentative_path).split(';')
pass
def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
...@@ -117,8 +106,6 @@ def determine_cuda_runtime_lib_path() -> Union[Path, None]: ...@@ -117,8 +106,6 @@ def determine_cuda_runtime_lib_path() -> Union[Path, None]:
if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"} if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
} }
cuda_runtime_libs = set() cuda_runtime_libs = set()
for env_var, value in remaining_candidate_env_vars.items(): for env_var, value in remaining_candidate_env_vars.items():
cuda_runtime_libs.update(find_cuda_lib_in(value)) cuda_runtime_libs.update(find_cuda_lib_in(value))
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
import ctypes as ct import ctypes as ct
import operator import operator
import random import random
import math
import torch import torch
from typing import Tuple from typing import Tuple
...@@ -248,23 +247,6 @@ def get_transform_func(dtype, orderA, orderOut, transpose=False): ...@@ -248,23 +247,6 @@ def get_transform_func(dtype, orderA, orderOut, transpose=False):
return getattr(lib, name) return getattr(lib, name)
class GlobalData(object):
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.data = {}
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def get_transform_buffer( def get_transform_buffer(
shape, dtype, device, to_order, from_order="row", transpose=False shape, dtype, device, to_order, from_order="row", transpose=False
): ):
......
...@@ -26,7 +26,3 @@ def execute_and_return(command_string: str) -> Tuple[str, str]: ...@@ -26,7 +26,3 @@ def execute_and_return(command_string: str) -> Tuple[str, str]:
def print_stderr(s: str) -> None: def print_stderr(s: str) -> None:
print(s, file=sys.stderr) print(s, file=sys.stderr)
def warn_of_missing_prerequisite(s: str) -> None:
print_stderr("WARNING, missing pre-requisite: " + s)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment