Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
FastFold
Commits
dcf7322c
Commit
dcf7322c
authored
Nov 16, 2022
by
zhuww
Browse files
modify setup.py
parent
930a58ad
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
7 additions
and
42 deletions
+7
-42
setup.py
setup.py
+7
-42
No files found.
setup.py
View file @
dcf7322c
...
...
@@ -3,45 +3,19 @@ import subprocess
import
torch
from
setuptools
import
setup
,
find_packages
from
torch.utils.cpp_extension
import
BuildExtension
,
CUDAExtension
,
CUDA
_HOME
from
torch.utils.cpp_extension
import
BuildExtension
,
CUDAExtension
,
ROCM
_HOME
# ninja build does not work unless include_dirs are abs path
this_dir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
def
get_cuda_bare_metal_version
(
cuda_dir
):
raw_output
=
subprocess
.
check_output
([
cuda_dir
+
"/bin/nvcc"
,
"-V"
],
universal_newlines
=
True
)
output
=
raw_output
.
split
()
release_idx
=
output
.
index
(
"release"
)
+
1
release
=
output
[
release_idx
].
split
(
"."
)
bare_metal_major
=
release
[
0
]
bare_metal_minor
=
release
[
1
][
0
]
return
raw_output
,
bare_metal_major
,
bare_metal_minor
def
check_cuda_torch_binary_vs_bare_metal
(
cuda_dir
):
raw_output
,
bare_metal_major
,
bare_metal_minor
=
get_cuda_bare_metal_version
(
cuda_dir
)
torch_binary_major
=
torch
.
version
.
cuda
.
split
(
"."
)[
0
]
torch_binary_minor
=
torch
.
version
.
cuda
.
split
(
"."
)[
1
]
print
(
"
\n
Compiling cuda extensions with"
)
print
(
raw_output
+
"from "
+
cuda_dir
+
"/bin
\n
"
)
if
(
bare_metal_major
!=
torch_binary_major
)
or
(
bare_metal_minor
!=
torch_binary_minor
):
raise
RuntimeError
(
"Cuda extensions are being compiled with a version of Cuda that does "
+
"not match the version used to compile Pytorch binaries. "
+
"Pytorch binaries were compiled with Cuda {}.
\n
"
.
format
(
torch
.
version
.
cuda
)
+
"In some cases, a minor-version mismatch will not cause later errors: "
+
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def
append_nvcc_threads
(
nvcc_extra_args
):
_
,
bare_metal_major
,
bare_metal_minor
=
get_cuda_bare_metal_version
(
CUDA_HOME
)
if
int
(
bare_metal_major
)
>=
11
and
int
(
bare_metal_minor
)
>=
2
:
return
nvcc_extra_args
+
[
"--threads"
,
"4"
]
return
nvcc_extra_args
...
...
@@ -57,12 +31,6 @@ if not torch.cuda.is_available():
'and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).
\n
'
'If you wish to cross-compile for a single specific architecture,
\n
'
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.
\n
'
)
if
os
.
environ
.
get
(
"TORCH_CUDA_ARCH_LIST"
,
None
)
is
None
:
_
,
bare_metal_major
,
_
=
get_cuda_bare_metal_version
(
CUDA_HOME
)
if
int
(
bare_metal_major
)
==
11
:
os
.
environ
[
"TORCH_CUDA_ARCH_LIST"
]
=
"6.0;6.1;6.2;7.0;7.5;8.0"
else
:
os
.
environ
[
"TORCH_CUDA_ARCH_LIST"
]
=
"6.0;6.1;6.2;7.0;7.5"
print
(
"
\n\n
torch.__version__ = {}
\n\n
"
.
format
(
torch
.
__version__
))
TORCH_MAJOR
=
int
(
torch
.
__version__
.
split
(
'.'
)[
0
])
...
...
@@ -82,12 +50,12 @@ ext_modules = []
# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac
version_dependent_macros
=
[
'-DVERSION_GE_1_1'
,
'-DVERSION_GE_1_3'
,
'-DVERSION_GE_1_5'
]
if
CUDA
_HOME
is
None
:
if
ROCM
_HOME
is
None
:
raise
RuntimeError
(
"Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc."
)
else
:
# check_cuda_torch_binary_vs_bare_metal(
CUDA
_HOME)
# check_cuda_torch_binary_vs_bare_metal(
ROCM
_HOME)
def
cuda_ext_helper
(
name
,
sources
,
extra_cuda_flags
):
return
CUDAExtension
(
...
...
@@ -96,11 +64,12 @@ else:
os
.
path
.
join
(
'fastfold/model/fastnn/kernel/cuda_native/csrc'
,
path
)
for
path
in
sources
],
include_dirs
=
[
os
.
path
.
join
(
this_dir
,
'fastfold/model/fastnn/kernel/cuda_native/csrc/include'
)
os
.
path
.
join
(
this_dir
,
'fastfold/model/fastnn/kernel/cuda_native/csrc/include'
),
os
.
path
.
join
(
this_dir
,
'fastfold/model/fastnn/kernel/cuda_native/csrc/'
),
],
extra_compile_args
=
{
'cxx'
:
[
'-O3'
]
+
version_dependent_macros
,
'
nv
cc'
:
'
hip
cc'
:
append_nvcc_threads
([
'-O3'
,
'--use_fast_math'
]
+
version_dependent_macros
+
extra_cuda_flags
)
})
...
...
@@ -108,10 +77,6 @@ else:
cc_flag
=
[
'-gencode'
,
'arch=compute_70,code=sm_70'
]
_
,
bare_metal_major
,
_
=
get_cuda_bare_metal_version
(
CUDA_HOME
)
if
int
(
bare_metal_major
)
>=
11
:
cc_flag
.
append
(
'-gencode'
)
cc_flag
.
append
(
'arch=compute_80,code=sm_80'
)
extra_cuda_flags
=
[
'-std=c++14'
,
'-maxrregcount=50'
,
'-U__CUDA_NO_HALF_OPERATORS__'
,
...
...
@@ -140,5 +105,5 @@ setup(
ext_modules
=
ext_modules
,
package_data
=
{
'fastfold'
:
[
'model/fastnn/kernel/cuda_native/csrc/*'
]},
cmdclass
=
{
'build_ext'
:
BuildExtension
}
if
ext_modules
else
{},
install_requires
=
[
'einops'
,
'colossalai'
],
#
install_requires=['einops', 'colossalai'],
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment