setup.py 22 KB
Newer Older
1
2
import glob
import os
3
import platform
Rinat Shigapov's avatar
Rinat Shigapov committed
4
import re
5
from pkg_resources import DistributionNotFound, get_distribution, parse_version
6
from setuptools import find_packages, setup
xiabo's avatar
xiabo committed
7
8
9
10
import subprocess
from typing import Optional, Union
from pathlib import Path

Yuanhao Zhu's avatar
Yuanhao Zhu committed
11
12
13
14
15
16
17

EXT_TYPE = ''
try:
    import torch
    if torch.__version__ == 'parrots':
        from parrots.utils.build_extension import BuildExtension
        EXT_TYPE = 'parrots'
18
19
20
21
    elif (hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()) or \
            os.getenv('FORCE_MLU', '0') == '1':
        from torch_mlu.utils.cpp_extension import BuildExtension
        EXT_TYPE = 'pytorch'
Yuanhao Zhu's avatar
Yuanhao Zhu committed
22
23
24
    else:
        from torch.utils.cpp_extension import BuildExtension
        EXT_TYPE = 'pytorch'
25
    cmd_class = {'build_ext': BuildExtension}
Yuanhao Zhu's avatar
Yuanhao Zhu committed
26
except ModuleNotFoundError:
27
    cmd_class = {}
Yuanhao Zhu's avatar
Yuanhao Zhu committed
28
    print('Skip building ext ops due to the absence of torch.')
29

Rinat Shigapov's avatar
Rinat Shigapov committed
30
31

def choose_requirement(primary, secondary):
Kai Chen's avatar
Kai Chen committed
32
33
    """If some version of primary requirement installed, return primary, else
    return secondary."""
Rinat Shigapov's avatar
Rinat Shigapov committed
34
35
36
37
38
39
40
41
42
    try:
        name = re.split(r'[!<>=]', primary)[0]
        get_distribution(name)
    except DistributionNotFound:
        return secondary

    return str(primary)


xiabo's avatar
xiabo committed
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
def get_sha(pytorch_root: Union[str, Path]) -> str:
    try:
        return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=pytorch_root).decode('ascii').strip()
    except Exception:
        return 'Unknown'


def get_abi():
    try:
        command = "echo '#include <string>' | gcc -x c++ -E -dM - | fgrep _GLIBCXX_USE_CXX11_ABI" 
        result = subprocess.run(command, shell=True, capture_output=True, text=True) 
        output = result.stdout.strip() 
        abi = "abi" + output.split(" ")[-1]
        return abi
    except Exception:
        return 'abiUnknown'


def get_version_add(sha: Optional[str] = None) -> str:
    version=''
    mmcv_root = os.path.dirname(os.path.abspath(__file__))
    add_version_path = os.path.join(os.path.join(mmcv_root, "mmcv"), "version.py")
    if sha != 'Unknown':
        if sha is None:
            sha = get_sha(mmcv_root)
        version = 'git' + sha[:7]

    # abi
    version += "." + get_abi()

    # dtk version
    if os.getenv("ROCM_PATH"):
        rocm_path = os.getenv('ROCM_PATH', "")
        rocm_version_path = os.path.join(rocm_path, '.info', "rocm_version")
        with open(rocm_version_path, 'r',encoding='utf-8') as file:
            lines = file.readlines()
        rocm_version=lines[0][:-2].replace(".", "")
        version += ".dtk" + rocm_version
    
    # torch version
    version += ".torch" + torch.__version__[:4]

    lines=[]
    with open(add_version_path, 'r',encoding='utf-8') as file:
        lines = file.readlines()
    lines[2] = "__dcu_version__ = '2.0.0+{}'\n".format(version)
    with open(add_version_path, encoding="utf-8",mode="w") as file:
        file.writelines(lines)
    file.close()


94
def get_version():
xiabo's avatar
xiabo committed
95
    get_version_add()
96
    version_file = 'mmcv/version.py'
97
    with open(version_file, encoding='utf-8') as f:
98
        exec(compile(f.read(), version_file, 'exec'))
xiabo's avatar
xiabo committed
99
    return locals()['__dcu_version__']
100
101


102
def parse_requirements(fname='requirements/runtime.txt', with_version=True):
Kai Chen's avatar
Kai Chen committed
103
    """Parse the package dependencies listed in a requirements file but strips
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
    specific versioning information.

    Args:
        fname (str): path to requirements file
        with_version (bool, default=False): if True include version specs

    Returns:
        List[str]: list of requirements items

    CommandLine:
        python -c "import setup; print(setup.parse_requirements())"
    """
    import sys
    from os.path import exists
    require_fpath = fname

    def parse_line(line):
Kai Chen's avatar
Kai Chen committed
121
        """Parse information from a line in a requirements text file."""
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
        if line.startswith('-r '):
            # Allow specifying requirements in other files
            target = line.split(' ')[1]
            for info in parse_require_file(target):
                yield info
        else:
            info = {'line': line}
            if line.startswith('-e '):
                info['package'] = line.split('#egg=')[1]
            else:
                # Remove versioning from the package
                pat = '(' + '|'.join(['>=', '==', '>']) + ')'
                parts = re.split(pat, line, maxsplit=1)
                parts = [p.strip() for p in parts]

                info['package'] = parts[0]
                if len(parts) > 1:
                    op, rest = parts[1:]
                    if ';' in rest:
                        # Handle platform specific dependencies
                        # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
                        version, platform_deps = map(str.strip,
                                                     rest.split(';'))
                        info['platform_deps'] = platform_deps
                    else:
                        version = rest  # NOQA
                    info['version'] = (op, version)
            yield info

    def parse_require_file(fpath):
152
        with open(fpath) as f:
153
154
155
            for line in f.readlines():
                line = line.strip()
                if line and not line.startswith('#'):
156
                    yield from parse_line(line)
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176

    def gen_packages_items():
        if exists(require_fpath):
            for info in parse_require_file(require_fpath):
                parts = [info['package']]
                if with_version and 'version' in info:
                    parts.extend(info['version'])
                if not sys.version.startswith('3.4'):
                    # apparently package_deps are broken in 3.4
                    platform_deps = info.get('platform_deps')
                    if platform_deps is not None:
                        parts.append(';' + platform_deps)
                item = ''.join(parts)
                yield item

    packages = list(gen_packages_items())
    return packages


install_requires = parse_requirements()
177
178
179
180
181
182
183
184
185
186
187
188
189
190

try:
    # OpenCV installed via conda.
    import cv2  # NOQA: F401
    major, minor, *rest = cv2.__version__.split('.')
    if int(major) < 3:
        raise RuntimeError(
            f'OpenCV >=3 is required but {cv2.__version__} is installed')
except ImportError:
    # If first not installed install second package
    CHOOSE_INSTALL_REQUIRES = [('opencv-python-headless>=3',
                                'opencv-python>=3')]
    for main, secondary in CHOOSE_INSTALL_REQUIRES:
        install_requires.append(choose_requirement(main, secondary))
191

Kai Chen's avatar
Kai Chen committed
192

193
194
195
def get_extensions():
    extensions = []

Zaida Zhou's avatar
Zaida Zhou committed
196
    if os.getenv('MMCV_WITH_OPS', '1') == '0':
197
198
        return extensions

Yuanhao Zhu's avatar
Yuanhao Zhu committed
199
200
201
    if EXT_TYPE == 'parrots':
        ext_name = 'mmcv._ext'
        from parrots.utils.build_extension import Extension
202

203
204
205
        # new parrots op impl do not use MMCV_USE_PARROTS
        # define_macros = [('MMCV_USE_PARROTS', None)]
        define_macros = []
206
207
        include_dirs = []
        op_files = glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') +\
208
            glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') +\
209
            glob.glob('./mmcv/ops/csrc/parrots/*.cpp')
210
211
        include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
        include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda'))
CokeDong's avatar
CokeDong committed
212
213
214
        op_files.remove('./mmcv/ops/csrc/pytorch/cuda/iou3d_cuda.cu')
        op_files.remove('./mmcv/ops/csrc/pytorch/cpu/bbox_overlaps_cpu.cpp')
        op_files.remove('./mmcv/ops/csrc/pytorch/cuda/bias_act_cuda.cu')
Yuanhao Zhu's avatar
Yuanhao Zhu committed
215
        cuda_args = os.getenv('MMCV_CUDA_ARGS')
216
        extra_compile_args = {
217
218
            'nvcc': [cuda_args, '-std=c++14'] if cuda_args else ['-std=c++14'],
            'cxx': ['-std=c++14'],
219
220
221
222
223
224
225
226
        }
        if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
            define_macros += [('MMCV_WITH_CUDA', None)]
            extra_compile_args['nvcc'] += [
                '-D__CUDA_NO_HALF_OPERATORS__',
                '-D__CUDA_NO_HALF_CONVERSIONS__',
                '-D__CUDA_NO_HALF2_OPERATORS__',
            ]
Yuanhao Zhu's avatar
Yuanhao Zhu committed
227
228
229
        ext_ops = Extension(
            name=ext_name,
            sources=op_files,
230
            include_dirs=include_dirs,
Yuanhao Zhu's avatar
Yuanhao Zhu committed
231
            define_macros=define_macros,
232
233
234
            extra_compile_args=extra_compile_args,
            cuda=True,
            pytorch=True)
Yuanhao Zhu's avatar
Yuanhao Zhu committed
235
236
        extensions.append(ext_ops)
    elif EXT_TYPE == 'pytorch':
237
        ext_name = 'mmcv._ext'
238
239
        from torch.utils.cpp_extension import CppExtension, CUDAExtension

Yuanhao Zhu's avatar
Yuanhao Zhu committed
240
        # prevent ninja from using too many resources
lizz's avatar
lizz committed
241
242
243
244
245
246
247
248
        try:
            import psutil
            num_cpu = len(psutil.Process().cpu_affinity())
            cpu_use = max(4, num_cpu - 1)
        except (ModuleNotFoundError, AttributeError):
            cpu_use = 4

        os.environ.setdefault('MAX_JOBS', str(cpu_use))
Yuanhao Zhu's avatar
Yuanhao Zhu committed
249
        define_macros = []
250
251
252
253
254
255
256
257
258

        # Before PyTorch1.8.0, when compiling CUDA code, `cxx` is a
        # required key passed to PyTorch. Even if there is no flag passed
        # to cxx, users also need to pass an empty list to PyTorch.
        # Since PyTorch1.8.0, it has a default value so users do not need
        # to pass an empty list anymore.
        # More details at https://github.com/pytorch/pytorch/pull/45956
        extra_compile_args = {'cxx': []}

259
260
        if platform.system() != 'Windows':
            extra_compile_args['cxx'] = ['-std=c++14']
261
262
263
264
265
266
        else:
            # TODO: In Windows, C++17 is chosen to compile extensions in
            # PyTorch2.0 , but a compile error will be reported.
            # As a temporary solution, force the use of C++14.
            if parse_version(torch.__version__) >= parse_version('2.0.0'):
                extra_compile_args['cxx'] = ['/std:c++14']
267

268
        include_dirs = []
269
270
        library_dirs = []
        libraries = []
Yuanhao Zhu's avatar
Yuanhao Zhu committed
271

272
        extra_objects = []
bdf's avatar
bdf committed
273
        extra_link_args = []
274
        is_rocm_pytorch = False
275
        try:
276
277
278
            from torch.utils.cpp_extension import ROCM_HOME
            is_rocm_pytorch = True if ((torch.version.hip is not None) and
                                       (ROCM_HOME is not None)) else False
279
280
        except ImportError:
            pass
281

282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
        if os.getenv('MMCV_WITH_DIOPI', '0') == '1':
            import mmengine  # NOQA: F401
            from mmengine.utils.version_utils import digit_version
            assert digit_version(mmengine.__version__) >= digit_version(
                '0.7.4'), f'mmengine >= 0.7.4 is required \
                but {mmengine.__version__} is installed'

            print(f'Compiling {ext_name} with CPU and DIPU')
            define_macros += [('MMCV_WITH_DIOPI', None)]
            define_macros += [('DIOPI_ATTR_WEAK', None)]
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')
            extension = CppExtension
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
            dipu_root = os.getenv('DIPU_ROOT')
            diopi_path = os.getenv('DIOPI_PATH')
            dipu_path = os.getenv('DIPU_PATH')
            vendor_include_dirs = os.getenv('VENDOR_INCLUDE_DIRS')
            nccl_include_dirs = os.getenv('NCCL_INCLUDE_DIRS')
            include_dirs.append(dipu_root)
            include_dirs.append(diopi_path + '/include')
            include_dirs.append(dipu_path + '/dist/include')
            include_dirs.append(vendor_include_dirs)
            if nccl_include_dirs:
                include_dirs.append(nccl_include_dirs)
            library_dirs += [dipu_root]
            libraries += ['torch_dipu']
        elif is_rocm_pytorch or torch.cuda.is_available() or os.getenv(
q.yao's avatar
q.yao committed
310
311
                'FORCE_CUDA', '0') == '1':
            if is_rocm_pytorch:
Zachary Streeter's avatar
Zachary Streeter committed
312
                define_macros += [('MMCV_WITH_HIP', None)]
Yuanhao Zhu's avatar
Yuanhao Zhu committed
313
            define_macros += [('MMCV_WITH_CUDA', None)]
zhuyuanhao's avatar
zhuyuanhao committed
314
            cuda_args = os.getenv('MMCV_CUDA_ARGS')
Yuanhao Zhu's avatar
Yuanhao Zhu committed
315
            extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
xiabo's avatar
xiabo committed
316
317
318
            if is_rocm_pytorch and platform.system() != 'Windows':
                extra_compile_args['nvcc'] += \
                    ['--gpu-max-threads-per-block=1024']
319
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
320
321
322
                glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cpp')
Yuanhao Zhu's avatar
Yuanhao Zhu committed
323
            extension = CUDAExtension
Zachary Streeter's avatar
Zachary Streeter committed
324
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/pytorch'))
325
326
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda'))
327
328
329
330
        elif (hasattr(torch, 'is_mlu_available') and
                torch.is_mlu_available()) or \
                os.getenv('FORCE_MLU', '0') == '1':
            from torch_mlu.utils.cpp_extension import MLUExtension
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396

            def get_mluops_version(file_path):
                with open(file_path) as f:
                    for line in f:
                        if re.search('MLUOP_MAJOR', line):
                            major = line.strip().split(' ')[2]
                        if re.search('MLUOP_MINOR', line):
                            minor = line.strip().split(' ')[2]
                        if re.search('MLUOP_PATCHLEVEL', line):
                            patchlevel = line.strip().split(' ')[2]
                mluops_version = f'v{major}.{minor}.{patchlevel}'
                return mluops_version

            mmcv_mluops_version = get_mluops_version(
                './mmcv/ops/csrc/pytorch/mlu/mlu_common_helper.h')
            mlu_ops_path = os.getenv('MMCV_MLU_OPS_PATH')
            if mlu_ops_path:
                exists_mluops_version = get_mluops_version(
                    mlu_ops_path + '/bangc-ops/mlu_op.h')
                if exists_mluops_version != mmcv_mluops_version:
                    print('the version of mlu-ops provided is %s,'
                          ' while %s is needed.' %
                          (exists_mluops_version, mmcv_mluops_version))
                    exit()
                try:
                    if os.path.exists('mlu-ops'):
                        if os.path.islink('mlu-ops'):
                            os.remove('mlu-ops')
                            os.symlink(mlu_ops_path, 'mlu-ops')
                        elif os.path.abspath('mlu-ops') != mlu_ops_path:
                            os.symlink(mlu_ops_path, 'mlu-ops')
                    else:
                        os.symlink(mlu_ops_path, 'mlu-ops')
                except Exception:
                    raise FileExistsError(
                        'mlu-ops already exists, please move it out,'
                        'or rename or remove it.')
            else:
                if not os.path.exists('mlu-ops'):
                    import requests
                    mluops_url = 'https://github.com/Cambricon/mlu-ops/' + \
                        'archive/refs/tags/' + mmcv_mluops_version + '.zip'
                    req = requests.get(mluops_url)
                    with open('./mlu-ops.zip', 'wb') as f:
                        try:
                            f.write(req.content)
                        except Exception:
                            raise ImportError('failed to download mlu-ops')

                    from zipfile import BadZipFile, ZipFile
                    with ZipFile('./mlu-ops.zip', 'r') as archive:
                        try:
                            archive.extractall()
                            dir_name = archive.namelist()[0].split('/')[0]
                            os.rename(dir_name, 'mlu-ops')
                        except BadZipFile:
                            print('invalid mlu-ops.zip file')
                else:
                    exists_mluops_version = get_mluops_version(
                        './mlu-ops/bangc-ops/mlu_op.h')
                    if exists_mluops_version != mmcv_mluops_version:
                        print('the version of provided mlu-ops is %s,'
                              ' while %s is needed.' %
                              (exists_mluops_version, mmcv_mluops_version))
                        exit()

397
            define_macros += [('MMCV_WITH_MLU', None)]
398
399
400
401
402
403
404
405
406
            mlu_args = os.getenv('MMCV_MLU_ARGS', '-DNDEBUG ')
            mluops_includes = []
            mluops_includes.append('-I' +
                                   os.path.abspath('./mlu-ops/bangc-ops'))
            mluops_includes.append(
                '-I' + os.path.abspath('./mlu-ops/bangc-ops/kernels'))
            extra_compile_args['cncc'] = [mlu_args] + \
                mluops_includes if mlu_args else mluops_includes
            extra_compile_args['cxx'] += ['-fno-gnu-unique']
407
408
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') + \
409
                glob.glob('./mmcv/ops/csrc/pytorch/mlu/*.cpp') + \
410
411
412
413
414
415
416
                glob.glob('./mmcv/ops/csrc/common/mlu/*.mlu') + \
                glob.glob(
                    './mlu-ops/bangc-ops/core/**/*.cpp', recursive=True) + \
                glob.glob(
                    './mlu-ops/bangc-ops/kernels/**/*.cpp', recursive=True) + \
                glob.glob(
                    './mlu-ops/bangc-ops/kernels/**/*.mlu', recursive=True)
bdf's avatar
bdf committed
417
418
419
420
421
            extra_link_args = [
                '-Wl,--whole-archive',
                './mlu-ops/bangc-ops/kernels/kernel_wrapper/lib/libextops.a',
                '-Wl,--no-whole-archive'
            ]
422
423
424
            extension = MLUExtension
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/mlu'))
425
            include_dirs.append(os.path.abspath('./mlu-ops/bangc-ops'))
Zaida Zhou's avatar
Zaida Zhou committed
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
        elif (hasattr(torch.backends, 'mps')
              and torch.backends.mps.is_available()) or os.getenv(
                  'FORCE_MPS', '0') == '1':
            # objc compiler support
            from distutils.unixccompiler import UnixCCompiler
            if '.mm' not in UnixCCompiler.src_extensions:
                UnixCCompiler.src_extensions.append('.mm')
                UnixCCompiler.language_map['.mm'] = 'objc'

            define_macros += [('MMCV_WITH_MPS', None)]
            extra_compile_args = {}
            extra_compile_args['cxx'] = ['-Wall', '-std=c++17']
            extra_compile_args['cxx'] += [
                '-framework', 'Metal', '-framework', 'Foundation'
            ]
            extra_compile_args['cxx'] += ['-ObjC++']
            # src
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/common/mps/*.mm') + \
                glob.glob('./mmcv/ops/csrc/pytorch/mps/*.mm')
            extension = CppExtension
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/mps'))
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
        elif (os.getenv('FORCE_NPU', '0') == '1'):
            print(f'Compiling {ext_name} only with CPU and NPU')
            try:
                from torch_npu.utils.cpp_extension import NpuExtension
                define_macros += [('MMCV_WITH_NPU', None)]
                extension = NpuExtension
            except Exception:
                raise ImportError('can not find any torch_npu')
            # src
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/common/npu/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/npu/*.cpp')
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/npu'))
465
        else:
466
            print(f'Compiling {ext_name} only with CPU')
467
468
            op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
                glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')
Yuanhao Zhu's avatar
Yuanhao Zhu committed
469
            extension = CppExtension
470
            include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
Yuanhao Zhu's avatar
Yuanhao Zhu committed
471

472
473
474
475
476
477
478
479
        # Since the PR (https://github.com/open-mmlab/mmcv/pull/1463) uses
        # c++14 features, the argument ['std=c++14'] must be added here.
        # However, in the windows environment, some standard libraries
        # will depend on c++17 or higher. In fact, for the windows
        # environment, the compiler will choose the appropriate compiler
        # to compile those cpp files, so there is no need to add the
        # argument
        if 'nvcc' in extra_compile_args and platform.system() != 'Windows':
480
481
            extra_compile_args['nvcc'] += ['-std=c++14']

Yuanhao Zhu's avatar
Yuanhao Zhu committed
482
483
484
        ext_ops = extension(
            name=ext_name,
            sources=op_files,
485
            include_dirs=include_dirs,
Yuanhao Zhu's avatar
Yuanhao Zhu committed
486
            define_macros=define_macros,
487
            extra_objects=extra_objects,
bdf's avatar
bdf committed
488
            extra_compile_args=extra_compile_args,
489
490
            library_dirs=library_dirs,
            libraries=libraries,
bdf's avatar
bdf committed
491
            extra_link_args=extra_link_args)
Yuanhao Zhu's avatar
Yuanhao Zhu committed
492
        extensions.append(ext_ops)
493
494
    return extensions

495

496
setup(
Zaida Zhou's avatar
Zaida Zhou committed
497
    name='mmcv' if os.getenv('MMCV_WITH_OPS', '1') == '1' else 'mmcv-lite',
498
    version=get_version(),
499
    description='OpenMMLab Computer Vision Foundation',
500
501
    keywords='computer vision',
    packages=find_packages(),
502
    include_package_data=True,
503
504
    classifiers=[
        'Development Status :: 4 - Beta',
Kai Chen's avatar
Kai Chen committed
505
        'License :: OSI Approved :: Apache Software License',
506
507
        'Operating System :: OS Independent',
        'Programming Language :: Python :: 3',
508
        'Programming Language :: Python :: 3.7',
509
        'Programming Language :: Python :: 3.8',
510
        'Programming Language :: Python :: 3.9',
Zaida Zhou's avatar
Zaida Zhou committed
511
        'Programming Language :: Python :: 3.10',
512
513
514
        'Topic :: Utilities',
    ],
    url='https://github.com/open-mmlab/mmcv',
Zaida Zhou's avatar
Zaida Zhou committed
515
    author='MMCV Contributors',
Kai Chen's avatar
Kai Chen committed
516
    author_email='openmmlab@gmail.com',
517
    install_requires=install_requires,
518
519
520
521
522
523
    extras_require={
        'all': parse_requirements('requirements.txt'),
        'tests': parse_requirements('requirements/test.txt'),
        'build': parse_requirements('requirements/build.txt'),
        'optional': parse_requirements('requirements/optional.txt'),
    },
524
    python_requires='>=3.7',
525
    ext_modules=get_extensions(),
526
    cmdclass=cmd_class,
527
    zip_safe=False)