setup.py 9.36 KB
Newer Older
1
import torch
2
from setuptools import setup, find_packages
mcarilli's avatar
mcarilli committed
3
import subprocess
4

Marek Kolodziej's avatar
Marek Kolodziej committed
5
from pip._internal import main as pipmain
jjsjann123's avatar
jjsjann123 committed
6
import sys
Marek Kolodziej's avatar
Marek Kolodziej committed
7
import warnings
jjsjann123's avatar
jjsjann123 committed
8

9
if not torch.cuda.is_available():
mcarilli's avatar
mcarilli committed
10
11
    print("\nWarning: Torch did not find available GPUs on this system.\n",
          "If your intention is to cross-compile, this is not an error.\n")
12

13
14
15
16
17
print("torch.__version__  = ", torch.__version__)
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])

if TORCH_MAJOR == 0 and TORCH_MINOR < 4:
Michael Carilli's avatar
Michael Carilli committed
18
      raise RuntimeError("Apex requires Pytorch 0.4 or newer.\n" +
19
20
                         "The latest stable release can be obtained from https://pytorch.org/")

jjsjann123's avatar
jjsjann123 committed
21
22
23
cmdclass = {}
ext_modules = []

Marek Kolodziej's avatar
Marek Kolodziej committed
24
25
26
27
28
29
30
31
32
33
34
if "--pyprof" in sys.argv:
    with open('requirements.txt') as f:
        required_packages = f.read().splitlines()
        pipmain(["install"] + required_packages)
    try:
        sys.argv.remove("--pyprof")
    except:
        pass
else:
    warnings.warn("Option --pyprof not specified. Not installing PyProf dependencies!")

35
if "--cpp_ext" in sys.argv or "--cuda_ext" in sys.argv:
Michael Carilli's avatar
Michael Carilli committed
36
37
    if TORCH_MAJOR == 0:
        raise RuntimeError("--cpp_ext requires Pytorch 1.0 or later, "
38
                           "found torch.__version__ = {}".format(torch.__version__))
39
40
41
42
43
44
45
46
47
48
    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

if "--cpp_ext" in sys.argv:
    from torch.utils.cpp_extension import CppExtension
    sys.argv.remove("--cpp_ext")
    ext_modules.append(
        CppExtension('apex_C',
                     ['csrc/flatten_unflatten.cpp',]))

mcarilli's avatar
mcarilli committed
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
    release = output[release_idx].split(".")
    bare_metal_major = release[0]
    bare_metal_minor = release[1][0]
    torch_binary_major = torch.version.cuda.split(".")[0]
    torch_binary_minor = torch.version.cuda.split(".")[1]

    print("\nCompiling cuda extensions with")
    print(raw_output + "from " + cuda_dir + "/bin\n")

    if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
Michael Carilli's avatar
Michael Carilli committed
63
64
65
66
67
68
        raise RuntimeError("Cuda extensions are being compiled with a version of Cuda that does " +
                           "not match the version used to compile Pytorch binaries.  " +
                           "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
                           "In some cases, a minor-version mismatch will not cause later errors:  " +
                           "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798.  "
                           "You can try commenting out this check (at your own risk).")
mcarilli's avatar
mcarilli committed
69

jjsjann123's avatar
jjsjann123 committed
70
if "--cuda_ext" in sys.argv:
71
    from torch.utils.cpp_extension import CUDAExtension
jjsjann123's avatar
jjsjann123 committed
72
    sys.argv.remove("--cuda_ext")
73
74

    if torch.utils.cpp_extension.CUDA_HOME is None:
Michael Carilli's avatar
Michael Carilli committed
75
        raise RuntimeError("--cuda_ext was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
76
    else:
mcarilli's avatar
mcarilli committed
77
78
        check_cuda_torch_binary_vs_bare_metal(torch.utils.cpp_extension.CUDA_HOME)

Michael Carilli's avatar
Michael Carilli committed
79
80
81
82
83
84
        # Set up macros for forward/backward compatibility hack around
        # https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
        version_ge_1_1 = []
        if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
            version_ge_1_1 = ['-DVERSION_GE_1_1']

85
86
        ext_modules.append(
            CUDAExtension(name='amp_C',
87
                          sources=['csrc/amp_C_frontend.cpp',
Michael Carilli's avatar
Michael Carilli committed
88
                                   'csrc/multi_tensor_sgd_kernel.cu',
89
                                   'csrc/multi_tensor_scale_kernel.cu',
90
                                   'csrc/multi_tensor_axpby_kernel.cu',
91
92
                                   'csrc/multi_tensor_l2norm_kernel.cu',
                                   'csrc/multi_tensor_lamb_stage_1.cu',
93
94
95
                                   'csrc/multi_tensor_lamb_stage_2.cu',
                                   'csrc/multi_tensor_adam.cu',
                                   'csrc/multi_tensor_novograd.cu'],
96
                          extra_compile_args={'cxx': ['-O3'],
Michael Carilli's avatar
Michael Carilli committed
97
                                              'nvcc':['-lineinfo',
98
                                                      '-O3',
99
                                                      # '--resource-usage',
Michael Carilli's avatar
Michael Carilli committed
100
                                                      '--use_fast_math']}))
101
102
        ext_modules.append(
            CUDAExtension(name='fused_adam_cuda',
103
104
                          sources=['csrc/fused_adam_cuda.cpp',
                                   'csrc/fused_adam_cuda_kernel.cu'],
105
                          extra_compile_args={'cxx': ['-O3',],
mcarilli's avatar
mcarilli committed
106
                                              'nvcc':['-O3',
107
108
109
110
111
112
113
                                                      '--use_fast_math']}))
        ext_modules.append(
            CUDAExtension(name='syncbn',
                          sources=['csrc/syncbn.cpp',
                                   'csrc/welford.cu']))
        ext_modules.append(
            CUDAExtension(name='fused_layer_norm_cuda',
114
115
                          sources=['csrc/layer_norm_cuda.cpp',
                                   'csrc/layer_norm_cuda_kernel.cu'],
Michael Carilli's avatar
Michael Carilli committed
116
                          extra_compile_args={'cxx': ['-O3'] + version_ge_1_1,
117
                                              'nvcc':['-maxrregcount=50',
mcarilli's avatar
mcarilli committed
118
                                                      '-O3',
Michael Carilli's avatar
Michael Carilli committed
119
                                                      '--use_fast_math'] + version_ge_1_1}))
jjsjann123's avatar
jjsjann123 committed
120

jjsjann123's avatar
jjsjann123 committed
121
122
123
124
125
126
127
128
if "--bnp" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--bnp")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

    if torch.utils.cpp_extension.CUDA_HOME is None:
129
        raise RuntimeError("--bnp was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
jjsjann123's avatar
jjsjann123 committed
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
    else:
        # Set up macros for forward/backward compatibility hack around
        # https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
        version_ge_1_1 = []
        if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
            version_ge_1_1 = ['-DVERSION_GE_1_1']
        ext_modules.append(
            CUDAExtension(name='bnp',
                          sources=['apex/contrib/csrc/groupbn/batch_norm.cu',
                                   'apex/contrib/csrc/groupbn/ipc.cu',
                                   'apex/contrib/csrc/groupbn/interface.cpp',
                                   'apex/contrib/csrc/groupbn/batch_norm_add_relu.cu'],
                          extra_compile_args={'cxx': [] + version_ge_1_1,
                                              'nvcc':['-DCUDA_HAS_FP16=1',
                                                      '-D__CUDA_NO_HALF_OPERATORS__',
                                                      '-D__CUDA_NO_HALF_CONVERSIONS__',
                                                      '-D__CUDA_NO_HALF2_OPERATORS__',
                                                      '-gencode',
                                                      'arch=compute_70,code=sm_70'] + version_ge_1_1}))

150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
if "--xentropy" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--xentropy")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

    if torch.utils.cpp_extension.CUDA_HOME is None:
        raise RuntimeError("--xentropy was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
        # Set up macros for forward/backward compatibility hack around
        # https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
        version_ge_1_1 = []
        if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
            version_ge_1_1 = ['-DVERSION_GE_1_1']
        ext_modules.append(
            CUDAExtension(name='xentropy_cuda',
                          sources=['apex/contrib/csrc/xentropy/interface.cpp',
                                   'apex/contrib/csrc/xentropy/xentropy_kernel.cu'],
                          include_dirs=['csrc'],
                          extra_compile_args={'cxx': ['-O3'] + version_ge_1_1,
                                              'nvcc':['-O3'] + version_ge_1_1}))

Christian Sarofeen's avatar
Christian Sarofeen committed
173
setup(
174
175
    name='apex',
    version='0.1',
176
177
178
179
    packages=find_packages(exclude=('build',
                                    'csrc',
                                    'include',
                                    'tests',
180
181
182
183
184
                                    'dist',
                                    'docs',
                                    'tests',
                                    'examples',
                                    'apex.egg-info',)),
Christian Sarofeen's avatar
Christian Sarofeen committed
185
    description='PyTorch Extensions written by NVIDIA',
jjsjann123's avatar
jjsjann123 committed
186
187
    ext_modules=ext_modules,
    cmdclass=cmdclass,
Christian Sarofeen's avatar
Christian Sarofeen committed
188
)