Unverified Commit 14398229 authored by Ruilong Li(李瑞龙)'s avatar Ruilong Li(李瑞龙) Committed by GitHub
Browse files

Workflows to build wheel (#151)

pre-built wheels 
parent e9dd9da6
#!/bin/bash
CUDA_HOME=/c/Program\ Files/NVIDIA\ GPU\ Computing\ Toolkit/CUDA/v11.3
PATH=${CUDA_HOME}/bin:$PATH
PATH=/c/Program\ Files\ \(x86\)/Microsoft\ Visual\ Studio/2017/BuildTools/MSBuild/15.0/Bin:$PATH
export FORCE_CUDA=1
export TORCH_CUDA_ARCH_LIST="6.0+PTX"
#!/bin/bash
# TODO We currently use CUDA 11.3 to build CUDA 11.5 Windows wheels
# Install NVIDIA drivers, see:
# https://github.com/pytorch/vision/blob/master/packaging/windows/internal/cuda_install.bat#L99-L102
curl -k -L "https://drive.google.com/u/0/uc?id=1injUyo3lnarMgWyRcXqKg4UGnN0ysmuq&export=download" --output "/tmp/gpu_driver_dlls.zip"
7z x "/tmp/gpu_driver_dlls.zip" -o"/c/Windows/System32"
export CUDA_SHORT=11.3
export CUDA_URL=https://developer.download.nvidia.com/compute/cuda/${CUDA_SHORT}.0/local_installers
export CUDA_FILE=cuda_${CUDA_SHORT}.0_465.89_win10.exe
# Install CUDA:
curl -k -L "${CUDA_URL}/${CUDA_FILE}" --output "${CUDA_FILE}"
echo ""
echo "Installing from ${CUDA_FILE}..."
PowerShell -Command "Start-Process -FilePath \"${CUDA_FILE}\" -ArgumentList \"-s nvcc_${CUDA_SHORT} cuobjdump_${CUDA_SHORT} nvprune_${CUDA_SHORT} cupti_${CUDA_SHORT} cublas_dev_${CUDA_SHORT} cudart_${CUDA_SHORT} cufft_dev_${CUDA_SHORT} curand_dev_${CUDA_SHORT} cusolver_dev_${CUDA_SHORT} cusparse_dev_${CUDA_SHORT} thrust_${CUDA_SHORT} npp_dev_${CUDA_SHORT} nvrtc_dev_${CUDA_SHORT} nvml_dev_${CUDA_SHORT}\" -Wait -NoNewWindow"
echo "Done!"
rm -f "${CUDA_FILE}"
#!/bin/bash
CUDA_HOME=/usr/local/cuda-11.6
LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
PATH=${CUDA_HOME}/bin:${PATH}
export FORCE_CUDA=1
export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5;8.0;8.6"
#!/bin/bash
OS=ubuntu1804
wget -nv https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-${OS}.pin
sudo mv cuda-${OS}.pin /etc/apt/preferences.d/cuda-repository-pin-600
wget -nv https://developer.download.nvidia.com/compute/cuda/11.6.2/local_installers/cuda-repo-${OS}-11-6-local_11.6.2-510.47.03-1_amd64.deb
sudo dpkg -i cuda-repo-${OS}-11-6-local_11.6.2-510.47.03-1_amd64.deb
sudo apt-key add /var/cuda-repo-${OS}-11-6-local/7fa2af80.pub
sudo apt-get -qq update
sudo apt install -y cuda-nvcc-11-6 cuda-libraries-dev-11-6
sudo apt clean
rm -f https://developer.download.nvidia.com/compute/cuda/11.5.2/local_installers/cuda-repo-${OS}-11-6-local_11.6.2-510.47.03-1_amd64.deb
#!/bin/bash
CUDA_HOME=/c/Program\ Files/NVIDIA\ GPU\ Computing\ Toolkit/CUDA/v11.3
PATH=${CUDA_HOME}/bin:$PATH
PATH=/c/Program\ Files\ \(x86\)/Microsoft\ Visual\ Studio/2017/BuildTools/MSBuild/15.0/Bin:$PATH
export FORCE_CUDA=1
export TORCH_CUDA_ARCH_LIST="6.0+PTX"
#!/bin/bash
# TODO We currently use CUDA 11.3 to build CUDA 11.6 Windows wheels
# Install NVIDIA drivers, see:
# https://github.com/pytorch/vision/blob/master/packaging/windows/internal/cuda_install.bat#L99-L102
curl -k -L "https://drive.google.com/u/0/uc?id=1injUyo3lnarMgWyRcXqKg4UGnN0ysmuq&export=download" --output "/tmp/gpu_driver_dlls.zip"
7z x "/tmp/gpu_driver_dlls.zip" -o"/c/Windows/System32"
export CUDA_SHORT=11.3
export CUDA_URL=https://developer.download.nvidia.com/compute/cuda/${CUDA_SHORT}.0/local_installers
export CUDA_FILE=cuda_${CUDA_SHORT}.0_465.89_win10.exe
# Install CUDA:
curl -k -L "${CUDA_URL}/${CUDA_FILE}" --output "${CUDA_FILE}"
echo ""
echo "Installing from ${CUDA_FILE}..."
PowerShell -Command "Start-Process -FilePath \"${CUDA_FILE}\" -ArgumentList \"-s nvcc_${CUDA_SHORT} cuobjdump_${CUDA_SHORT} nvprune_${CUDA_SHORT} cupti_${CUDA_SHORT} cublas_dev_${CUDA_SHORT} cudart_${CUDA_SHORT} cufft_dev_${CUDA_SHORT} curand_dev_${CUDA_SHORT} cusolver_dev_${CUDA_SHORT} cusparse_dev_${CUDA_SHORT} thrust_${CUDA_SHORT} npp_dev_${CUDA_SHORT} nvrtc_dev_${CUDA_SHORT} nvml_dev_${CUDA_SHORT}\" -Wait -NoNewWindow"
echo "Done!"
rm -f "${CUDA_FILE}"
#!/bin/bash
CUDA_HOME=/usr/local/cuda-11.7
LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
PATH=${CUDA_HOME}/bin:${PATH}
export FORCE_CUDA=1
export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5;8.0;8.6"
#!/bin/bash
OS=ubuntu1804
wget -nv https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-${OS}.pin
sudo mv cuda-${OS}.pin /etc/apt/preferences.d/cuda-repository-pin-600
wget -nv https://developer.download.nvidia.com/compute/cuda/11.7.1/local_installers/cuda-repo-${OS}-11-7-local_11.7.1-515.65.01-1_amd64.deb
sudo dpkg -i cuda-repo-${OS}-11-7-local_11.7.1-515.65.01-1_amd64.deb
sudo cp /var/cuda-repo-${OS}-11-7-local/cuda-*-keyring.gpg /usr/share/keyrings/
sudo apt-get -qq update
sudo apt install -y cuda-nvcc-11-7 cuda-libraries-dev-11-7
sudo apt clean
rm -f https://developer.download.nvidia.com/compute/cuda/11.7.1/local_installers/cuda-repo-${OS}-11-7-local_11.7.1-515.65.01-1_amd64.deb
#!/bin/bash
CUDA_HOME=/c/Program\ Files/NVIDIA\ GPU\ Computing\ Toolkit/CUDA/v11.3
PATH=${CUDA_HOME}/bin:$PATH
PATH=/c/Program\ Files\ \(x86\)/Microsoft\ Visual\ Studio/2017/BuildTools/MSBuild/15.0/Bin:$PATH
export FORCE_CUDA=1
export TORCH_CUDA_ARCH_LIST="6.0+PTX"
#!/bin/bash
# TODO We currently use CUDA 11.3 to build CUDA 11.7 Windows wheels
# Install NVIDIA drivers, see:
# https://github.com/pytorch/vision/blob/master/packaging/windows/internal/cuda_install.bat#L99-L102
curl -k -L "https://drive.google.com/u/0/uc?id=1injUyo3lnarMgWyRcXqKg4UGnN0ysmuq&export=download" --output "/tmp/gpu_driver_dlls.zip"
7z x "/tmp/gpu_driver_dlls.zip" -o"/c/Windows/System32"
export CUDA_SHORT=11.3
export CUDA_URL=https://developer.download.nvidia.com/compute/cuda/${CUDA_SHORT}.0/local_installers
export CUDA_FILE=cuda_${CUDA_SHORT}.0_465.89_win10.exe
# Install CUDA:
curl -k -L "${CUDA_URL}/${CUDA_FILE}" --output "${CUDA_FILE}"
echo ""
echo "Installing from ${CUDA_FILE}..."
PowerShell -Command "Start-Process -FilePath \"${CUDA_FILE}\" -ArgumentList \"-s nvcc_${CUDA_SHORT} cuobjdump_${CUDA_SHORT} nvprune_${CUDA_SHORT} cupti_${CUDA_SHORT} cublas_dev_${CUDA_SHORT} cudart_${CUDA_SHORT} cufft_dev_${CUDA_SHORT} curand_dev_${CUDA_SHORT} cusolver_dev_${CUDA_SHORT} cusparse_dev_${CUDA_SHORT} thrust_${CUDA_SHORT} npp_dev_${CUDA_SHORT} nvrtc_dev_${CUDA_SHORT} nvml_dev_${CUDA_SHORT}\" -Wait -NoNewWindow"
echo "Done!"
rm -f "${CUDA_FILE}"
...@@ -6,6 +6,7 @@ name: Upload Python Package ...@@ -6,6 +6,7 @@ name: Upload Python Package
on: on:
release: release:
types: [created] types: [created]
branches: [master]
jobs: jobs:
deploy: deploy:
...@@ -28,5 +29,5 @@ jobs: ...@@ -28,5 +29,5 @@ jobs:
env: env:
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
run: | run: |
python -m build BUILD_NO_CUDA=1 python -m build
twine upload --username __token__ --password $PYPI_TOKEN dist/* twine upload --username __token__ --password $PYPI_TOKEN dist/*
\ No newline at end of file
# cmake_minimum_required(VERSION 3.3)
# project(nerfacc LANGUAGES CXX CUDA)
# find_package(pybind11 REQUIRED)
# find_package(Torch REQUIRED)
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
# set(SOURCE_DIR nerfacc/cuda/csrc)
# set(INCLUDE_DIR nerfacc/cuda/csrc/include)
# file(GLOB SOURCES ${SOURCE_DIR}/*.cu)
# pybind11_add_module(${PROJECT_NAME} SHARED ${SOURCES})
# target_link_libraries(${PROJECT_NAME} PRIVATE "${TORCH_LIBRARIES}")
# target_include_directories(${PROJECT_NAME} PRIVATE "${INCLUDE_DIR}")
# # message(STATUS "CUDA enabled")
# # set( CMAKE_CUDA_STANDARD 14 )
# # set( CMAKE_CUDA_STANDARD_REQUIRED ON)
# # find_package(pybind11 REQUIRED)
# # # find_package(Python3 REQUIRED COMPONENTS Development)
# # # target_link_libraries(${PROJECT_NAME} PRIVATE Python3::Python)
# # find_package(Torch REQUIRED)
# # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
# # target_link_libraries(${PROJECT_NAME} PRIVATE ${TORCH_LIBRARIES})
# # set(CSRC nerfacc/cuda/csrc)
# # file(GLOB_RECURSE ALL_SOURCES ${ALL_SOURCES} ${CSRC}/*.cu)
# # file(GLOB_RECURSE ALL_HEADERS ${CSRC}/include/*.h)
# # add_library(${PROJECT_NAME} SHARED ${ALL_SOURCES})
# # target_include_directories(${PROJECT_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}")
# # set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0")
# # message("-- CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
# # message("-- CMAKE_CXX_FLAGS_DEBUG: ${CMAKE_CXX_FLAGS_DEBUG}")
# # message("-- CMAKE_CXX_FLAGS_RELEASE: ${CMAKE_CXX_FLAGS_RELEASE}")
# # set_target_properties(${PROJECT_NAME} PROPERTIES
# # EXPORT_NAME nerfacc
# # INSTALL_RPATH ${TORCH_INSTALL_PREFIX}/lib)
# # Cmake creates *.dylib by default, but python expects *.so by default
# # if (APPLE)
# # set_property(TARGET ${PROJECT_NAME} PROPERTY SUFFIX .so)
# # endif()
\ No newline at end of file
...@@ -32,10 +32,27 @@ Using NerfAcc, ...@@ -32,10 +32,27 @@ Using NerfAcc,
## Installation ## Installation
The easist way is to install from PyPI, and it will build itself on the first run.
``` ```
pip install nerfacc pip install nerfacc
``` ```
We also provide pre-built wheels covering major combinations of Pytorch + CUDA supported by [official Pytorch](https://pytorch.org/get-started/previous-versions/).
```
# e.g., Windows & Linux, torch 1.13.0 + cu117
pip install nerfacc -f https://nerfacc-bucket.s3.us-west-2.amazonaws.com/whl/torch-1.13.0_cu117.html
```
| | `cu102` | `cu113` | `cu116` | `cu117` |
|--------------|---------|---------|---------|---------|
| torch 1.10.0 | ✅ | ✅ | | |
| torch 1.11.0 | ✅* | ✅ | | |
| torch 1.12.0 | ✅* | ✅ | ✅ | |
| torch 1.13.0 | | | ✅ | ✅ |
\* Pytorch does not support Windows pre-built wheels for those combinations thus we do not support as well.
## Usage ## Usage
The idea of NerfAcc is to perform efficient ray marching and volumetric rendering. So NerfAcc can work with any user-defined radiance field. To plug the NerfAcc rendering pipeline into your code and enjoy the acceleration, you only need to define two functions with your radiance field. The idea of NerfAcc is to perform efficient ray marching and volumetric rendering. So NerfAcc can work with any user-defined radiance field. To plug the NerfAcc rendering pipeline into your code and enjoy the acceleration, you only need to define two functions with your radiance field.
...@@ -144,6 +161,13 @@ Used by: ...@@ -144,6 +161,13 @@ Used by:
- [instant-nsr-pl](https://github.com/bennyguo/instant-nsr-pl): NeuS in 10 minutes. - [instant-nsr-pl](https://github.com/bennyguo/instant-nsr-pl): NeuS in 10 minutes.
## Build Status
We
#### Linux & Windows
## Citation ## Citation
```bibtex ```bibtex
......
import pytorch_sphinx_theme import pytorch_sphinx_theme
from nerfacc import __version__ __version__ = None
exec(open("../../nerfacc/version.py", "r").read())
# -- Project information # -- Project information
......
...@@ -6,11 +6,8 @@ import glob ...@@ -6,11 +6,8 @@ import glob
import json import json
import os import os
import shutil import shutil
import urllib.request
import zipfile
from subprocess import DEVNULL, call from subprocess import DEVNULL, call
from packaging import version
from rich.console import Console from rich.console import Console
from torch.utils.cpp_extension import _get_build_directory, load from torch.utils.cpp_extension import _get_build_directory, load
...@@ -47,44 +44,17 @@ extra_cflags = ["-O3"] ...@@ -47,44 +44,17 @@ extra_cflags = ["-O3"]
extra_cuda_cflags = ["-O3"] extra_cuda_cflags = ["-O3"]
_C = None _C = None
if cuda_toolkit_available():
# # we need cub >= 1.15.0 which is shipped with cuda >= 11.6, so download if
# # necessary. (compling does not garentee to success)
# if version.parse(cuda_toolkit_version()) < version.parse("11.6"):
# target_path = os.path.join(build_dir, "cub-1.17.0")
# if not os.path.exists(target_path):
# zip_path, _ = urllib.request.urlretrieve(
# "https://github.com/NVIDIA/cub/archive/1.17.0.tar.gz",
# os.path.join(build_dir, "cub-1.17.0.tar.gz"),
# )
# shutil.unpack_archive(zip_path, build_dir)
# extra_include_paths.append(target_path)
# extra_cuda_cflags.append("-DTHRUST_IGNORE_CUB_VERSION_CHECK")
# print(
# f"download cub because the cuda version is {cuda_toolkit_version()}"
# )
if os.listdir(build_dir) != []: try:
# If the build exists, we assume the extension has been built # try to import the compiled module (via setup.py)
# and we can load it. from nerfacc import csrc as _C
Console().print( except ImportError:
"[yellow]NerfAcc: CUDA set up, loading (should be quick)[/yellow]" # if failed, try with JIT compilation
) if cuda_toolkit_available():
_C = load( if os.listdir(build_dir) != []:
name=name, # If the build exists, we assume the extension has been built
sources=glob.glob(os.path.join(PATH, "csrc/*.cu")), # and we can load it.
extra_cflags=extra_cflags,
extra_cuda_cflags=extra_cuda_cflags,
extra_include_paths=extra_include_paths,
)
else:
# Build from scratch. Remove the build directory just to be safe: pytorch jit might stuck
# if the build directory exists.
shutil.rmtree(build_dir)
with Console().status(
"[bold yellow]NerfAcc: Setting up CUDA (This may take a few minutes the first time)",
spinner="bouncingBall",
):
_C = load( _C = load(
name=name, name=name,
sources=glob.glob(os.path.join(PATH, "csrc/*.cu")), sources=glob.glob(os.path.join(PATH, "csrc/*.cu")),
...@@ -92,10 +62,25 @@ if cuda_toolkit_available(): ...@@ -92,10 +62,25 @@ if cuda_toolkit_available():
extra_cuda_cflags=extra_cuda_cflags, extra_cuda_cflags=extra_cuda_cflags,
extra_include_paths=extra_include_paths, extra_include_paths=extra_include_paths,
) )
else: else:
Console().print( # Build from scratch. Remove the build directory just to be safe: pytorch jit might stuck
"[yellow]NerfAcc: No CUDA toolkit found. NerfAcc will be disabled.[/yellow]" # if the build directory exists.
) shutil.rmtree(build_dir)
with Console().status(
"[bold yellow]NerfAcc: Setting up CUDA (This may take a few minutes the first time)",
spinner="bouncingBall",
):
_C = load(
name=name,
sources=glob.glob(os.path.join(PATH, "csrc/*.cu")),
extra_cflags=extra_cflags,
extra_cuda_cflags=extra_cuda_cflags,
extra_include_paths=extra_include_paths,
)
else:
Console().print(
"[yellow]NerfAcc: No CUDA toolkit found. NerfAcc will be disabled.[/yellow]"
)
__all__ = ["_C"] __all__ = ["_C"]
""" """
Copyright (c) 2022 Ruilong Li, UC Berkeley. Copyright (c) 2022 Ruilong Li, UC Berkeley.
""" """
try:
from importlib.metadata import version
except ImportError:
# Running on pre-3.8 Python; use importlib-metadata package
from importlib_metadata import version
__version__ = version("nerfacc") __version__ = "0.3.3"
[build-system]
requires = ["setuptools==65.5.0"]
build-backend = "setuptools.build_meta"
[project]
name = "nerfacc"
version = "0.3.3"
description = "A General NeRF Acceleration Toolbox."
readme = "README.md"
authors = [{name = "Ruilong", email = "ruilongli94@gmail.com"}]
license = { text="MIT" }
requires-python = ">=3.7"
dependencies = [
"importlib_metadata>=5.0.0; python_version<'3.8'",
"ninja>=1.10.2.3",
"pybind11>=2.10.0",
"torch", # tested with 1.12.0
"rich>=12"
]
# [options]
# equivalent to using --extra-index-url with pip,
# which is needed for specifying the CUDA version for torch
# dependency_links = [
# "https://download.pytorch.org/whl/cu116"
# ]
[tool.setuptools.package-data]
"*" = ["*.cu", "*.cpp", "*.h"]
[project.urls]
homepage = "https://www.nerfacc.com/en/latest/"
documentation = "https://www.nerfacc.com/en/latest/"
repository = "https://github.com/KAIR-BAIR/nerfacc/"
[project.optional-dependencies]
# Development packages
dev = [
"black[jupyter]==22.3.0",
"isort==5.10.1",
"pylint==2.13.4",
"pytest==7.1.2",
"pytest-xdist==2.5.0",
"typeguard>=2.13.3",
"pyyaml==6.0",
"build",
"twine",
]
# [tool.setuptools.packages.find]
# include = ["nerfacc", "scripts"]
[tool.black]
line-length = 80
[tool.isort]
multi_line_output = 3
line_length = 80
include_trailing_comma = true
[tool.pylint.messages_control]
max-line-length = 80
generated-members = ["numpy.*", "torch.*", "cv2.*", "cv.*"]
good-names-rgxs = "^[_a-zA-Z][_a-z0-9]?$"
ignore-paths = ["^tests/.*$", "examples/pycolmap"]
jobs = 0
disable = [
"duplicate-code",
"fixme",
"logging-fstring-interpolation",
"too-many-arguments",
"too-many-branches",
"too-many-instance-attributes",
"too-many-locals",
"unnecessary-ellipsis",
]
[tool.pytest.ini_options]
addopts = "-n=4 --typeguard-packages=nerfacc --disable-warnings"
testpaths = [
"tests",
]
[tool.pyright]
include = ["nerfacc"]
exclude = ["**/node_modules",
"**/__pycache__",
]
ignore = ["nerfacc/cuda"]
defineConstant = { DEBUG = true }
pythonVersion = "3.9"
pythonPlatform = "Linux"
import argparse
import os
from boto3 import client
parser = argparse.ArgumentParser()
parser.add_argument("--access_key_id", type=str, required=True)
parser.add_argument("--secret_access_key", type=str, required=True)
parser.add_argument("--bucket", type=str, required=True)
parser.add_argument("--region", type=str, required=True)
args = parser.parse_args()
URL = f"https://{args.bucket}.s3.{args.region}.amazonaws.com/"
s3 = client(
"s3",
aws_access_key_id=args.access_key_id,
aws_secret_access_key=args.secret_access_key,
)
responses = s3.list_objects_v2(Bucket=args.bucket, Prefix="whl/")["Contents"]
subdirectories = {}
for data in responses:
splits = data["Key"].split("/")
if len(splits) == 3:
subdirectories[splits[1]] = []
for dir in subdirectories.keys():
responses = s3.list_objects_v2(Bucket=args.bucket, Prefix=f"whl/{dir}")[
"Contents"
]
for data in responses:
splits = data["Key"].split("/")
if len(splits) == 3:
subdirectories[dir].append(splits[2])
for dir, files in subdirectories.items():
lines = ""
for file in files:
href = os.path.join(URL, "whl", dir, file)
lines += f"<a href='{href}'>{file}</a>\n<br>\n"
html = f"<html>\n<head></head>\n<body>\n{lines}\n</body>\n</html>\n"
html_file = f"/tmp/{dir}.html"
with open(html_file, "w") as f:
f.write(html)
s3.upload_file(
html_file,
args.bucket,
f"whl/{dir}.html",
ExtraArgs={"ContentType": "text/html"},
)
[isort]
multi_line_output = 3
line_length = 80
include_trailing_comma = true
skip=./examples/pycolmap
\ No newline at end of file
import glob
import os
import os.path as osp
import platform
import sys
from setuptools import find_packages, setup
__version__ = None
exec(open("nerfacc/version.py", "r").read())
URL = "https://github.com/KAIR-BAIR/nerfacc"
BUILD_NO_CUDA = os.getenv("BUILD_NO_CUDA", "0") == "1"
WITH_SYMBOLS = os.getenv("WITH_SYMBOLS", "0") == "1"
def get_ext():
from torch.utils.cpp_extension import BuildExtension
return BuildExtension.with_options(
no_python_abi_suffix=True, use_ninja=False
)
def get_extensions():
import torch
from torch.__config__ import parallel_info
from torch.utils.cpp_extension import CUDAExtension
extensions_dir = osp.join("nerfacc", "cuda", "csrc")
sources = glob.glob(osp.join(extensions_dir, "*.cu"))
# remove generated 'hip' files, in case of rebuilds
sources = [path for path in sources if "hip" not in path]
undef_macros = []
define_macros = []
if sys.platform == "win32":
define_macros += [("nerfacc_EXPORTS", None)]
extra_compile_args = {"cxx": ["-O3"]}
if not os.name == "nt": # Not on Windows:
extra_compile_args["cxx"] += ["-Wno-sign-compare"]
extra_link_args = [] if WITH_SYMBOLS else ["-s"]
info = parallel_info()
if (
"backend: OpenMP" in info
and "OpenMP not found" not in info
and sys.platform != "darwin"
):
extra_compile_args["cxx"] += ["-DAT_PARALLEL_OPENMP"]
if sys.platform == "win32":
extra_compile_args["cxx"] += ["/openmp"]
else:
extra_compile_args["cxx"] += ["-fopenmp"]
else:
print("Compiling without OpenMP...")
# Compile for mac arm64
if sys.platform == "darwin" and platform.machine() == "arm64":
extra_compile_args["cxx"] += ["-arch", "arm64"]
extra_link_args += ["-arch", "arm64"]
nvcc_flags = os.getenv("NVCC_FLAGS", "")
nvcc_flags = [] if nvcc_flags == "" else nvcc_flags.split(" ")
nvcc_flags += ["-O3"]
if torch.version.hip:
# USE_ROCM was added to later versions of PyTorch.
# Define here to support older PyTorch versions as well:
define_macros += [("USE_ROCM", None)]
undef_macros += ["__HIP_NO_HALF_CONVERSIONS__"]
else:
nvcc_flags += ["--expt-relaxed-constexpr"]
extra_compile_args["nvcc"] = nvcc_flags
extension = CUDAExtension(
f"nerfacc.csrc",
sources,
include_dirs=[osp.join(extensions_dir, "include")],
define_macros=define_macros,
undef_macros=undef_macros,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
return [extension]
# work-around hipify abs paths
include_package_data = True
# if torch.cuda.is_available() and torch.version.hip:
# include_package_data = False
setup(
name="nerfacc",
version=__version__,
description="A General NeRF Acceleration Toolbox",
author="Ruilong",
author_email="ruilongli94@gmail.com",
url=URL,
download_url=f"{URL}/archive/{__version__}.tar.gz",
keywords=[],
python_requires=">=3.7",
install_requires=["rich>=12", "torch"],
extras_require={
# dev dependencies. Install them by `pip install nerfacc[dev]`
"dev": [
"black[jupyter]==22.3.0",
"isort==5.10.1",
"pylint==2.13.4",
"pytest==7.1.2",
"pytest-xdist==2.5.0",
"typeguard>=2.13.3",
"pyyaml==6.0",
"build",
"twine",
],
},
ext_modules=get_extensions() if not BUILD_NO_CUDA else [],
cmdclass={"build_ext": get_ext()} if not BUILD_NO_CUDA else {},
packages=find_packages(),
include_package_data=include_package_data,
)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment