Commit f7e64e20 authored by xiabo's avatar xiabo
Browse files

mmcv之前修改追加

parent d409eedc
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
namespace tv { namespace tv {
#ifdef __NVCC__ #ifdef __CUDACC__
#define TV_HOST_DEVICE_INLINE __forceinline__ __device__ __host__ #define TV_HOST_DEVICE_INLINE __forceinline__ __device__ __host__
#define TV_DEVICE_INLINE __forceinline__ __device__ #define TV_DEVICE_INLINE __forceinline__ __device__
#define TV_HOST_DEVICE __device__ __host__ #define TV_HOST_DEVICE __device__ __host__
......
#include <cuda_runtime_api.h> #include <cuda_runtime_api.h>
#include <torch/script.h> #include <torch/script.h>
#include "../spconv_utils.h"
#include <utils/spconv/spconv/indice.h> #include <utils/spconv/spconv/indice.h>
#include <utils/spconv/spconv/reordering.h> #include <utils/spconv/spconv/reordering.h>
#include "../spconv_utils.h" //#include "../spconv_utils.h"
#include "pytorch_cuda_helper.hpp" #include "pytorch_cuda_helper.hpp"
torch::Tensor FusedIndiceConvBatchnormCUDAKernelLauncher( torch::Tensor FusedIndiceConvBatchnormCUDAKernelLauncher(
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include <ATen/ATen.h> #include <ATen/ATen.h>
#include "../spconv_utils.h"
#include <utils/spconv/spconv/indice.h> #include <utils/spconv/spconv/indice.h>
#include <utils/spconv/spconv/mp_helper.h> #include <utils/spconv/spconv/mp_helper.h>
#include <utils/spconv/tensorview/helper_launch.h> #include <utils/spconv/tensorview/helper_launch.h>
...@@ -23,7 +24,7 @@ ...@@ -23,7 +24,7 @@
#include <spconv/indice.cuh> #include <spconv/indice.cuh>
#include <type_traits> #include <type_traits>
#include "../spconv_utils.h" //#include "../spconv_utils.h"
#include "pytorch_cuda_helper.hpp" #include "pytorch_cuda_helper.hpp"
namespace functor { namespace functor {
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include <ATen/ATen.h> #include <ATen/ATen.h>
#include "../spconv_utils.h"
#include <utils/spconv/spconv/maxpool.h> #include <utils/spconv/spconv/maxpool.h>
#include <utils/spconv/spconv/mp_helper.h> #include <utils/spconv/spconv/mp_helper.h>
#include <utils/spconv/tensorview/helper_launch.h> #include <utils/spconv/tensorview/helper_launch.h>
...@@ -23,7 +24,7 @@ ...@@ -23,7 +24,7 @@
#include <type_traits> #include <type_traits>
#include <utils/spconv/tensorview/helper_kernel.cuh> #include <utils/spconv/tensorview/helper_kernel.cuh>
#include "../spconv_utils.h" //#include "../spconv_utils.h"
#include "pytorch_cuda_helper.hpp" #include "pytorch_cuda_helper.hpp"
template <typename scalar_t, typename Index, int NumTLP, int NumILP> template <typename scalar_t, typename Index, int NumTLP, int NumILP>
......
#include <cuda_runtime_api.h> #include <cuda_runtime_api.h>
#include <torch/script.h> #include <torch/script.h>
#include "../spconv_utils.h"
#include <utils/spconv/spconv/maxpool.h> #include <utils/spconv/spconv/maxpool.h>
#include "../spconv_utils.h" //#include "../spconv_utils.h"
#include "pytorch_cuda_helper.hpp" #include "pytorch_cuda_helper.hpp"
torch::Tensor IndiceMaxpoolForwardCUDAKernelLauncher(torch::Tensor features, torch::Tensor IndiceMaxpoolForwardCUDAKernelLauncher(torch::Tensor features,
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include <ATen/ATen.h> #include <ATen/ATen.h>
#include "../spconv_utils.h"
#include <utils/spconv/spconv/mp_helper.h> #include <utils/spconv/spconv/mp_helper.h>
#include <utils/spconv/spconv/reordering.h> #include <utils/spconv/spconv/reordering.h>
#include <utils/spconv/tensorview/helper_launch.h> #include <utils/spconv/tensorview/helper_launch.h>
...@@ -24,7 +25,7 @@ ...@@ -24,7 +25,7 @@
#include <type_traits> #include <type_traits>
#include <utils/spconv/tensorview/helper_kernel.cuh> #include <utils/spconv/tensorview/helper_kernel.cuh>
#include "../spconv_utils.h" //#include "../spconv_utils.h"
#include "pytorch_cuda_helper.hpp" #include "pytorch_cuda_helper.hpp"
namespace functor { namespace functor {
......
#include <cuda_runtime_api.h> #include <cuda_runtime_api.h>
#include <torch/script.h> #include <torch/script.h>
#include "../spconv_utils.h"
#include <utils/spconv/spconv/indice.h> #include <utils/spconv/spconv/indice.h>
#include <utils/spconv/spconv/reordering.h> #include <utils/spconv/spconv/reordering.h>
#include "../spconv_utils.h" //#include "../spconv_utils.h"
#include "pytorch_cuda_helper.hpp" #include "pytorch_cuda_helper.hpp"
template <unsigned NDim> template <unsigned NDim>
......
...@@ -5,6 +5,9 @@ import re ...@@ -5,6 +5,9 @@ import re
import warnings import warnings
from pkg_resources import DistributionNotFound, get_distribution from pkg_resources import DistributionNotFound, get_distribution
from setuptools import find_packages, setup from setuptools import find_packages, setup
import subprocess
from typing import Optional, Union
from pathlib import Path
EXT_TYPE = '' EXT_TYPE = ''
try: try:
...@@ -24,6 +27,29 @@ except ModuleNotFoundError: ...@@ -24,6 +27,29 @@ except ModuleNotFoundError:
cmd_class = {} cmd_class = {}
print('Skip building ext ops due to the absence of torch.') print('Skip building ext ops due to the absence of torch.')
def get_sha(pytorch_root: Union[str, Path]) -> str:
try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=pytorch_root).decode('ascii').strip()
except Exception:
return 'Unknown'
def get_version_add(sha: Optional[str] = None) -> str:
mmcv_root = Path(__file__).parent.parent
add_version_path = mmcv_root / "mmcv" / "version.py"
if sha != 'Unknown':
if sha is None:
sha = get_sha(mmcv_root)
version = 'git' + sha[:7]
if os.getenv('MMCV_BUILD_VERSION'):
version_dtk = os.getenv('MMCV_BUILD_VERSION', "")
version += "." + version_dtk
with open(add_version_path, encoding="utf-8",mode="a") as file:
file.write("__version__=__version__+'+{}'\n".format(version))
file.close()
def choose_requirement(primary, secondary): def choose_requirement(primary, secondary):
"""If some version of primary requirement installed, return primary, else """If some version of primary requirement installed, return primary, else
...@@ -38,6 +64,7 @@ def choose_requirement(primary, secondary): ...@@ -38,6 +64,7 @@ def choose_requirement(primary, secondary):
def get_version(): def get_version():
get_version_add()
version_file = 'mmcv/version.py' version_file = 'mmcv/version.py'
with open(version_file, encoding='utf-8') as f: with open(version_file, encoding='utf-8') as f:
exec(compile(f.read(), version_file, 'exec')) exec(compile(f.read(), version_file, 'exec'))
...@@ -290,6 +317,7 @@ def get_extensions(): ...@@ -290,6 +317,7 @@ def get_extensions():
glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cpp') glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cpp')
extension = CUDAExtension extension = CUDAExtension
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/pytorch'))
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda'))
elif (hasattr(torch, 'is_mlu_available') and elif (hasattr(torch, 'is_mlu_available') and
torch.is_mlu_available()) or \ torch.is_mlu_available()) or \
...@@ -419,7 +447,34 @@ setup( ...@@ -419,7 +447,34 @@ setup(
description='OpenMMLab Computer Vision Foundation', description='OpenMMLab Computer Vision Foundation',
keywords='computer vision', keywords='computer vision',
packages=find_packages(), packages=find_packages(),
include_package_data=True, #include_package_data=True,
include_package_data=False,
package_data = {
'mmcv': [
'model_zoo/*.json',
'ops/csrc/common/cuda/*.cuh',
'ops/csrc/common/hip/*.cuh',
'ops/csrc/common/cuda/*.hpp',
'ops/csrc/common/hip/*.hpp',
'ops/csrc/common/*.hpp',
'ops/csrc/pytorch/*.cpp',
'ops/csrc/pytorch/cuda/*.cu',
'ops/csrc/pytorch/hip/*.hip',
'ops/csrc/pytorch/cuda/*.cpp',
'ops/csrc/pytorch/hip/*.cpp',
'ops/csrc/pytorch/cpu/*.cpp',
'ops/csrc/parrots/*.h',
'ops/csrc/parrots/*.cpp',
'ops/csrc/commom/*.hpp',
'ops/csrc/commom/cuda/spconv/*.cuh',
'ops/csrc/commom/hip/spconv/*.cuh',
'ops/csrc/commom/utils/spconv/*.h',
'ops/csrc/commom/utils/spconv/spconv/*.h',
'ops/csrc/commom/utils/spconv/tensorview/*.h',
'ops/csrc/pytorch/*.h',
]
},
classifiers=[ classifiers=[
'Development Status :: 4 - Beta', 'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License', 'License :: OSI Approved :: Apache Software License',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment