setup.py 36.6 KB
Newer Older
1
import torch
Masaki Kozuki's avatar
Masaki Kozuki committed
2
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
3
from setuptools import setup, find_packages
mcarilli's avatar
mcarilli committed
4
import subprocess
5

jjsjann123's avatar
jjsjann123 committed
6
import sys
Marek Kolodziej's avatar
Marek Kolodziej committed
7
import warnings
mcarilli's avatar
mcarilli committed
8
import os
jjsjann123's avatar
jjsjann123 committed
9

10
11
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
12
13
14
15
16
17
18
19
20
21
22
torch_dir = torch.__path__[0]

# https://github.com/pytorch/pytorch/pull/71881
# For the extensions which have rocblas_gemm_flags_fp16_alt_impl we need to make sure if at::BackwardPassGuard exists.
# It helps the extensions be backward compatible with old PyTorch versions.
# The check and ROCM_BACKWARD_PASS_GUARD in nvcc/hipcc args can be retired once the PR is merged into PyTorch upstream.

context_file = os.path.join(torch_dir, "include", "ATen", "Context.h")
if os.path.exists(context_file):
    lines = open(context_file, 'r').readlines()
    found_Backward_Pass_Guard = False
23
    found_ROCmBackward_Pass_Guard = False
24
25
    for line in lines:
        if "BackwardPassGuard" in line:
26
27
28
29
30
31
            # BackwardPassGuard has been renamed to ROCmBackwardPassGuard
            # https://github.com/pytorch/pytorch/pull/71881/commits/4b82f5a67a35406ffb5691c69e6b4c9086316a43
            if "ROCmBackwardPassGuard" in line:
                found_ROCmBackward_Pass_Guard = True
            else:
                found_Backward_Pass_Guard = True
32
            break
33

34

ptrblck's avatar
ptrblck committed
35
36
37
38
39
40
41
42
43
44
def get_cuda_bare_metal_version(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
    release = output[release_idx].split(".")
    bare_metal_major = release[0]
    bare_metal_minor = release[1][0]

    return raw_output, bare_metal_major, bare_metal_minor

45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93

def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
    raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
    torch_binary_major = torch.version.cuda.split(".")[0]
    torch_binary_minor = torch.version.cuda.split(".")[1]

    print("\nCompiling cuda extensions with")
    print(raw_output + "from " + cuda_dir + "/bin\n")

    if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
        raise RuntimeError(
            "Cuda extensions are being compiled with a version of Cuda that does "
            "not match the version used to compile Pytorch binaries.  "
            "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
            + "In some cases, a minor-version mismatch will not cause later errors:  "
            "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798.  "
            "You can try commenting out this check (at your own risk)."
        )


def raise_if_cuda_home_none(global_option: str) -> None:
    if CUDA_HOME is not None:
        return
    raise RuntimeError(
        f"{global_option} was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  "
        "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
        "only images whose names contain 'devel' will provide nvcc."
    )


def append_nvcc_threads(nvcc_extra_args):
    _, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
    if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
        return nvcc_extra_args + ["--threads", "4"]
    return nvcc_extra_args


def check_cudnn_version_and_warn(global_option: str, required_cudnn_version: int) -> bool:
    cudnn_available = torch.backends.cudnn.is_available()
    cudnn_version = torch.backends.cudnn.version() if cudnn_available else None
    if not (cudnn_available and (cudnn_version >= required_cudnn_version)):
        warnings.warn(
            f"Skip `{global_option}` as it requires cuDNN {required_cudnn_version} or later, "
            f"but {'cuDNN is not available' if not cudnn_available else cudnn_version}"
        )
        return False
    return True


Jithun Nair's avatar
Jithun Nair committed
94
95
96
97
print("\n\ntorch.__version__  = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])

98
99
def check_if_rocm_pytorch():
    is_rocm_pytorch = False
Jithun Nair's avatar
Jithun Nair committed
100
    if TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 5):
101
102
103
104
105
106
107
108
        from torch.utils.cpp_extension import ROCM_HOME
        is_rocm_pytorch = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False

    return is_rocm_pytorch

IS_ROCM_PYTORCH = check_if_rocm_pytorch()

if not torch.cuda.is_available() and not IS_ROCM_PYTORCH:
mcarilli's avatar
mcarilli committed
109
110
111
    # https://github.com/NVIDIA/apex/issues/486
    # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
    # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
112
113
114
115
116
117
118
119
120
    print(
        "\nWarning: Torch did not find available GPUs on this system.\n",
        "If your intention is to cross-compile, this is not an error.\n"
        "By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
        "Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
        "and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
        "If you wish to cross-compile for a single specific architecture,\n"
        'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
    )
mcarilli's avatar
mcarilli committed
121
    if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
Masaki Kozuki's avatar
Masaki Kozuki committed
122
        _, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
ptrblck's avatar
ptrblck committed
123
124
        if int(bare_metal_major) == 11:
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
Masaki Kozuki's avatar
Masaki Kozuki committed
125
126
            if int(bare_metal_minor) > 0:
                os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
ptrblck's avatar
ptrblck committed
127
128
        else:
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
129
130
131
132
133
elif not torch.cuda.is_available() and IS_ROCM_PYTORCH:
    print('\nWarning: Torch did not find available GPUs on this system.\n',
          'If your intention is to cross-compile, this is not an error.\n'
          'By default, Apex will cross-compile for the same gfx targets\n'
          'used by default in ROCm PyTorch\n')
134

135
if TORCH_MAJOR == 0 and TORCH_MINOR < 4:
136
137
138
    raise RuntimeError(
        "Apex requires Pytorch 0.4 or newer.\nThe latest stable release can be obtained from https://pytorch.org/"
    )
139

jjsjann123's avatar
jjsjann123 committed
140
141
142
cmdclass = {}
ext_modules = []

ptrblck's avatar
ptrblck committed
143
extras = {}
Marek Kolodziej's avatar
Marek Kolodziej committed
144

145
if "--cpp_ext" in sys.argv or "--cuda_ext" in sys.argv:
Michael Carilli's avatar
Michael Carilli committed
146
147
    if TORCH_MAJOR == 0:
        raise RuntimeError("--cpp_ext requires Pytorch 1.0 or later, "
148
                           "found torch.__version__ = {}".format(torch.__version__))
149
    cmdclass['build_ext'] = BuildExtension
150
151
if "--cpp_ext" in sys.argv:
    sys.argv.remove("--cpp_ext")
152
    ext_modules.append(CppExtension("apex_C", ["csrc/flatten_unflatten.cpp"]))
mcarilli's avatar
mcarilli committed
153

mcarilli's avatar
mcarilli committed
154
155
156
157
158
159
160
# Set up macros for forward/backward compatibility hack around
# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
# and
# https://github.com/NVIDIA/apex/issues/456
# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
161
    version_ge_1_1 = ["-DVERSION_GE_1_1"]
mcarilli's avatar
mcarilli committed
162
163
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
164
    version_ge_1_3 = ["-DVERSION_GE_1_3"]
165
166
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
167
    version_ge_1_5 = ["-DVERSION_GE_1_5"]
168
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
mcarilli's avatar
mcarilli committed
169

170
if "--distributed_adam" in sys.argv or "--cuda_ext" in sys.argv:
171
    from torch.utils.cpp_extension import CUDAExtension
172
173
    if "--distributed_adam" in sys.argv:
        sys.argv.remove("--distributed_adam")
174
175
176
177

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

178
    if torch.utils.cpp_extension.CUDA_HOME is None and not IS_ROCM_PYTORCH:
179
180
        raise RuntimeError("--distributed_adam was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
181
182
        nvcc_args_adam = ['-O3', '--use_fast_math'] + version_dependent_macros
        hipcc_args_adam = ['-O3'] + version_dependent_macros
183
184
185
186
        ext_modules.append(
            CUDAExtension(name='distributed_adam_cuda',
                          sources=['apex/contrib/csrc/optimizers/multi_tensor_distopt_adam.cpp',
                                   'apex/contrib/csrc/optimizers/multi_tensor_distopt_adam_kernel.cu'],
187
188
                          include_dirs=[os.path.join(this_dir, 'csrc'),
                                        os.path.join(this_dir, 'apex/contrib/csrc/optimizers')],
189
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
190
                                              'nvcc':nvcc_args_adam if not IS_ROCM_PYTORCH else hipcc_args_adam}))
191

192
if "--distributed_lamb" in sys.argv or "--cuda_ext" in sys.argv:
193
    from torch.utils.cpp_extension import CUDAExtension
194
195
    if "--distributed_lamb" in sys.argv:
        sys.argv.remove("--distributed_lamb")
196
197
198
199

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

200
    if torch.utils.cpp_extension.CUDA_HOME is None and not IS_ROCM_PYTORCH:
201
202
        raise RuntimeError("--distributed_lamb was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
203
204
205
        print ("INFO: Building the distributed_lamb extension.")
        nvcc_args_distributed_lamb = ['-O3', '--use_fast_math'] + version_dependent_macros
        hipcc_args_distributed_lamb = ['-O3'] + version_dependent_macros
206
207
208
209
210
211
        ext_modules.append(
            CUDAExtension(name='distributed_lamb_cuda',
                          sources=['apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb.cpp',
                                   'apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb_kernel.cu'],
                          include_dirs=[os.path.join(this_dir, 'csrc')],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
212
                                              'nvcc': nvcc_args_distributed_lamb if not IS_ROCM_PYTORCH else hipcc_args_distributed_lamb}))
213

jjsjann123's avatar
jjsjann123 committed
214
if "--cuda_ext" in sys.argv:
215
    from torch.utils.cpp_extension import CUDAExtension
216

217
    if torch.utils.cpp_extension.CUDA_HOME is None and not IS_ROCM_PYTORCH:
Michael Carilli's avatar
Michael Carilli committed
218
        raise RuntimeError("--cuda_ext was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
219
    else:
220
        if not IS_ROCM_PYTORCH:
221
222
            check_cuda_torch_binary_vs_bare_metal(torch.utils.cpp_extension.CUDA_HOME)

223
224
225
226
        print ("INFO: Building the multi-tensor apply extension.")
        nvcc_args_multi_tensor = ['-lineinfo', '-O3', '--use_fast_math'] + version_dependent_macros
        hipcc_args_multi_tensor = ['-O3'] + version_dependent_macros
        ext_modules.append(
227
228
229
230
231
232
            CUDAExtension(name='amp_C',
                          sources=['csrc/amp_C_frontend.cpp',
                                   'csrc/multi_tensor_sgd_kernel.cu',
                                   'csrc/multi_tensor_scale_kernel.cu',
                                   'csrc/multi_tensor_axpby_kernel.cu',
                                   'csrc/multi_tensor_l2norm_kernel.cu',
233
                                   'csrc/multi_tensor_l2norm_kernel_mp.cu',
234
                                   'csrc/multi_tensor_l2norm_scale_kernel.cu',
235
236
237
238
239
                                   'csrc/multi_tensor_lamb_stage_1.cu',
                                   'csrc/multi_tensor_lamb_stage_2.cu',
                                   'csrc/multi_tensor_adam.cu',
                                   'csrc/multi_tensor_adagrad.cu',
                                   'csrc/multi_tensor_novograd.cu',
240
241
                                   'csrc/multi_tensor_lamb.cu',
                                   'csrc/multi_tensor_lamb_mp.cu'],
242
                          include_dirs=[os.path.join(this_dir, 'csrc')],
243
244
                          extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                              'nvcc': nvcc_args_multi_tensor if not IS_ROCM_PYTORCH else hipcc_args_multi_tensor}))
245

lcskrishna's avatar
lcskrishna committed
246
        print ("INFO: Building syncbn extension.")
247
        ext_modules.append(
248
249
250
            CUDAExtension(name='syncbn',
                          sources=['csrc/syncbn.cpp',
                                   'csrc/welford.cu'],
251
                          include_dirs=[os.path.join(this_dir, 'csrc')],
252
253
                          extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                              'nvcc':['-O3'] + version_dependent_macros}))
254

255
        nvcc_args_layer_norm = ['-maxrregcount=50', '-O3', '--use_fast_math'] + version_dependent_macros
256
257
258
        hipcc_args_layer_norm = ['-O3'] + version_dependent_macros
        print ("INFO: Building fused layernorm extension.")
        ext_modules.append(
259
260
261
            CUDAExtension(name='fused_layer_norm_cuda',
                          sources=['csrc/layer_norm_cuda.cpp',
                                   'csrc/layer_norm_cuda_kernel.cu'],
262
                          include_dirs=[os.path.join(this_dir, 'csrc')],
263
264
                          extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                              'nvcc': nvcc_args_layer_norm if not IS_ROCM_PYTORCH else hipcc_args_layer_norm}))
265

266
267
268
269
270
271
        hipcc_args_mlp = ['-O3'] + version_dependent_macros
        if found_Backward_Pass_Guard:
            hipcc_args_mlp = hipcc_args_mlp + ['-DBACKWARD_PASS_GUARD'] + ['-DBACKWARD_PASS_GUARD_CLASS=BackwardPassGuard']
        if found_ROCmBackward_Pass_Guard:
            hipcc_args_mlp = hipcc_args_mlp + ['-DBACKWARD_PASS_GUARD'] + ['-DBACKWARD_PASS_GUARD_CLASS=ROCmBackwardPassGuard']

272
273
        print ("INFO: Building the MLP Extension.")
        ext_modules.append(
274
275
276
            CUDAExtension(name='mlp_cuda',
                          sources=['csrc/mlp.cpp',
                                   'csrc/mlp_cuda.cu'],
277
                          include_dirs=[os.path.join(this_dir, 'csrc')],
278
                          extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
279
280
                                              'nvcc':['-O3'] + version_dependent_macros
                                              if not IS_ROCM_PYTORCH else hipcc_args_mlp}))
281

282
283
284
285
286
287
        ext_modules.append(
            CUDAExtension(name='fused_dense_cuda',
                          sources=['csrc/fused_dense.cpp',
                                   'csrc/fused_dense_cuda.cu'],
                          extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                              'nvcc':['-O3'] + version_dependent_macros}))
Hubert Lu's avatar
Hubert Lu committed
288
289
290
291
292
293
294
295
        nvcc_args_transformer = ['-O3',
                                 '-U__CUDA_NO_HALF_OPERATORS__',
                                 '-U__CUDA_NO_HALF_CONVERSIONS__',
                                 '--expt-relaxed-constexpr',
                                 '--expt-extended-lambda'] + version_dependent_macros
        hipcc_args_transformer = ['-O3',
                                 '-U__CUDA_NO_HALF_OPERATORS__',
                                 '-U__CUDA_NO_HALF_CONVERSIONS__'] + version_dependent_macros
Masaki Kozuki's avatar
Masaki Kozuki committed
296
297
298
299
300
301
        ext_modules.append(
            CUDAExtension(name='scaled_upper_triang_masked_softmax_cuda',
                          sources=['csrc/megatron/scaled_upper_triang_masked_softmax.cpp',
                                   'csrc/megatron/scaled_upper_triang_masked_softmax_cuda.cu'],
                          include_dirs=[os.path.join(this_dir, 'csrc')],
                          extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
Hubert Lu's avatar
Hubert Lu committed
302
                                              'nvcc':nvcc_args_transformer if not IS_ROCM_PYTORCH else hipcc_args_transformer}))
Masaki Kozuki's avatar
Masaki Kozuki committed
303
304
305
306
        ext_modules.append(
            CUDAExtension(name='scaled_masked_softmax_cuda',
                          sources=['csrc/megatron/scaled_masked_softmax.cpp',
                                   'csrc/megatron/scaled_masked_softmax_cuda.cu'],
Hubert Lu's avatar
Hubert Lu committed
307
308
                          include_dirs=[os.path.join(this_dir, 'csrc'),
                                        os.path.join(this_dir, 'csrc/megatron')],
Masaki Kozuki's avatar
Masaki Kozuki committed
309
                          extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
Hubert Lu's avatar
Hubert Lu committed
310
311
                                              'nvcc':nvcc_args_transformer if not IS_ROCM_PYTORCH else hipcc_args_transformer}))

312

313
if "--bnp" in sys.argv or "--cuda_ext" in sys.argv:
jjsjann123's avatar
jjsjann123 committed
314
    from torch.utils.cpp_extension import CUDAExtension
315
316
    if "--bnp" in sys.argv:
        sys.argv.remove("--bnp")
jjsjann123's avatar
jjsjann123 committed
317
318
319
320

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

321
    if torch.utils.cpp_extension.CUDA_HOME is None and not IS_ROCM_PYTORCH:
322
        raise RuntimeError("--bnp was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
jjsjann123's avatar
jjsjann123 committed
323
324
325
326
327
328
329
    else:
        ext_modules.append(
            CUDAExtension(name='bnp',
                          sources=['apex/contrib/csrc/groupbn/batch_norm.cu',
                                   'apex/contrib/csrc/groupbn/ipc.cu',
                                   'apex/contrib/csrc/groupbn/interface.cpp',
                                   'apex/contrib/csrc/groupbn/batch_norm_add_relu.cu'],
330
331
                          include_dirs=[os.path.join(this_dir, 'csrc'),
                                        os.path.join(this_dir, 'apex/contrib/csrc/groupbn')],
mcarilli's avatar
mcarilli committed
332
                          extra_compile_args={'cxx': [] + version_dependent_macros,
jjsjann123's avatar
jjsjann123 committed
333
334
335
                                              'nvcc':['-DCUDA_HAS_FP16=1',
                                                      '-D__CUDA_NO_HALF_OPERATORS__',
                                                      '-D__CUDA_NO_HALF_CONVERSIONS__',
336
                                                      '-D__CUDA_NO_HALF2_OPERATORS__'] + version_dependent_macros}))
jjsjann123's avatar
jjsjann123 committed
337

338
if "--xentropy" in sys.argv or "--cuda_ext" in sys.argv:
339
    from torch.utils.cpp_extension import CUDAExtension
340
341
    if "--xentropy" in sys.argv:
        sys.argv.remove("--xentropy")
342
343
344
345

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

346
    if torch.utils.cpp_extension.CUDA_HOME is None and not IS_ROCM_PYTORCH:
347
348
        raise RuntimeError("--xentropy was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
349
350
        print ("INFO: Building the xentropy extension.")
        ext_modules.append(
351
352
353
            CUDAExtension(name='xentropy_cuda',
                          sources=['apex/contrib/csrc/xentropy/interface.cpp',
                                   'apex/contrib/csrc/xentropy/xentropy_kernel.cu'],
354
355
                          include_dirs=[os.path.join(this_dir, 'csrc'),
                                        os.path.join(this_dir, 'apex/contrib/csrc/xentropy')],
356
357
                          extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                              'nvcc':['-O3'] + version_dependent_macros}))
358

359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
if "--focal_loss" in sys.argv or "--cuda_ext" in sys.argv:
    if "--focal_loss" in sys.argv:
        sys.argv.remove("--focal_loss")
    ext_modules.append(
        CUDAExtension(
            name='focal_loss_cuda',
            sources=[
                'apex/contrib/csrc/focal_loss/focal_loss_cuda.cpp',
                'apex/contrib/csrc/focal_loss/focal_loss_cuda_kernel.cu',
            ],
            include_dirs=[os.path.join(this_dir, 'csrc')],
            extra_compile_args={
                'cxx': ['-O3'] + version_dependent_macros,
                'nvcc':(['-O3', '--use_fast_math', '--ftz=false'] if not IS_ROCM_PYTORCH else ['-O3']) + version_dependent_macros,
            },
        )
    )

if "--index_mul_2d" in sys.argv or "--cuda_ext" in sys.argv:
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
    if "--index_mul_2d" in sys.argv:
        sys.argv.remove("--index_mul_2d")
    ext_modules.append(
        CUDAExtension(
            name='fused_index_mul_2d',
            sources=[
                'apex/contrib/csrc/index_mul_2d/index_mul_2d_cuda.cpp',
                'apex/contrib/csrc/index_mul_2d/index_mul_2d_cuda_kernel.cu',
            ],
            include_dirs=[os.path.join(this_dir, 'csrc')],
            extra_compile_args={
                'cxx': ['-O3'] + version_dependent_macros,
                'nvcc':(['-O3', '--use_fast_math', '--ftz=false'] if not IS_ROCM_PYTORCH else ['-O3']) + version_dependent_macros,
            },
        )
    )
394

395
if "--deprecated_fused_adam" in sys.argv or "--cuda_ext" in sys.argv:
396
    from torch.utils.cpp_extension import CUDAExtension
397
398
    if "--deprecated_fused_adam" in sys.argv:
        sys.argv.remove("--deprecated_fused_adam")
399
400
401
402

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

403
    if torch.utils.cpp_extension.CUDA_HOME is None and not IS_ROCM_PYTORCH:
404
405
        raise RuntimeError("--deprecated_fused_adam was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
406
407
408
409
        print ("INFO: Building deprecated fused adam extension.")
        nvcc_args_fused_adam = ['-O3', '--use_fast_math'] + version_dependent_macros
        hipcc_args_fused_adam = ['-O3'] + version_dependent_macros
        ext_modules.append(
410
411
412
            CUDAExtension(name='fused_adam_cuda',
                          sources=['apex/contrib/csrc/optimizers/fused_adam_cuda.cpp',
                                   'apex/contrib/csrc/optimizers/fused_adam_cuda_kernel.cu'],
413
414
                          include_dirs=[os.path.join(this_dir, 'csrc'),
                                        os.path.join(this_dir, 'apex/contrib/csrc/optimizers')],
415
416
417
                          extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                              'nvcc' : nvcc_args_fused_adam if not IS_ROCM_PYTORCH else hipcc_args_fused_adam}))

418
if "--deprecated_fused_lamb" in sys.argv or "--cuda_ext" in sys.argv:
419
    from torch.utils.cpp_extension import CUDAExtension
420
421
    if "--deprecated_fused_lamb" in sys.argv:
        sys.argv.remove("--deprecated_fused_lamb")
422
423
424
425

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

426
    if torch.utils.cpp_extension.CUDA_HOME is None and not IS_ROCM_PYTORCH:
427
428
        raise RuntimeError("--deprecated_fused_lamb was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
429
430
431
432
        print ("INFO: Building deprecated fused lamb extension.")
        nvcc_args_fused_lamb = ['-O3', '--use_fast_math'] + version_dependent_macros
        hipcc_args_fused_lamb = ['-O3'] + version_dependent_macros
        ext_modules.append(
433
434
435
436
437
438
            CUDAExtension(name='fused_lamb_cuda',
                          sources=['apex/contrib/csrc/optimizers/fused_lamb_cuda.cpp',
                                   'apex/contrib/csrc/optimizers/fused_lamb_cuda_kernel.cu',
                                   'csrc/multi_tensor_l2norm_kernel.cu'],
                          include_dirs=[os.path.join(this_dir, 'csrc')],
                          extra_compile_args = nvcc_args_fused_lamb if not IS_ROCM_PYTORCH else hipcc_args_fused_lamb))
439

440
441
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
ptrblck's avatar
ptrblck committed
442
443
generator_flag = []
torch_dir = torch.__path__[0]
444
445
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
    generator_flag = ["-DOLD_GENERATOR_PATH"]
ptrblck's avatar
ptrblck committed
446

yjk21's avatar
yjk21 committed
447
448
if "--fast_layer_norm" in sys.argv:
    sys.argv.remove("--fast_layer_norm")
449
450
451
452
453
454
455
    raise_if_cuda_home_none("--fast_layer_norm")
    # Check, if CUDA11 is installed for compute capability 8.0
    cc_flag = []
    _, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
    if int(bare_metal_major) >= 11:
        cc_flag.append("-gencode")
        cc_flag.append("arch=compute_80,code=sm_80")
yjk21's avatar
yjk21 committed
456

457
    if CUDA_HOME is None and not IS_ROCM_PYTORCH:
yjk21's avatar
yjk21 committed
458
459
460
461
        raise RuntimeError("--fast_layer_norm was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
        # Check, if CUDA11 is installed for compute capability 8.0
        cc_flag = []
Masaki Kozuki's avatar
Masaki Kozuki committed
462
        _, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
yjk21's avatar
yjk21 committed
463
464
465
466
        if int(bare_metal_major) >= 11:
            cc_flag.append('-gencode')
            cc_flag.append('arch=compute_80,code=sm_80')

yjk21's avatar
yjk21 committed
467
468
if "--fmha" in sys.argv:
    sys.argv.remove("--fmha")
469
470
471
472
473
474
475
476
    raise_if_cuda_home_none("--fmha")
    # Check, if CUDA11 is installed for compute capability 8.0
    cc_flag = []
    _, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
    if int(bare_metal_major) < 11:
        raise RuntimeError("--fmha only supported on SM80")
    cc_flag.append("-gencode")
    cc_flag.append("arch=compute_80,code=sm_80")
yjk21's avatar
yjk21 committed
477

478
    if CUDA_HOME is None and not IS_ROCM_PYTORCH:
yjk21's avatar
yjk21 committed
479
480
481
482
        raise RuntimeError("--fmha was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
        # Check, if CUDA11 is installed for compute capability 8.0
        cc_flag = []
Masaki Kozuki's avatar
Masaki Kozuki committed
483
        _, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
yjk21's avatar
yjk21 committed
484
485
486
487
488
489
490
        if int(bare_metal_major) < 11:
            raise RuntimeError("--fmha only supported on SM80")

        ext_modules.append(
            CUDAExtension(name='fmhalib',
                          sources=[
                                   'apex/contrib/csrc/fmha/fmha_api.cpp',
yjk21's avatar
yjk21 committed
491
                                   'apex/contrib/csrc/fmha/src/fmha_noloop_reduce.cu',
yjk21's avatar
yjk21 committed
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
                                   'apex/contrib/csrc/fmha/src/fmha_fprop_fp16_128_64_kernel.sm80.cu',
                                   'apex/contrib/csrc/fmha/src/fmha_fprop_fp16_256_64_kernel.sm80.cu',
                                   'apex/contrib/csrc/fmha/src/fmha_fprop_fp16_384_64_kernel.sm80.cu',
                                   'apex/contrib/csrc/fmha/src/fmha_fprop_fp16_512_64_kernel.sm80.cu',
                                   'apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_128_64_kernel.sm80.cu',
                                   'apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_256_64_kernel.sm80.cu',
                                   'apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_384_64_kernel.sm80.cu',
                                   'apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_512_64_kernel.sm80.cu',
                                   ],
                          extra_compile_args={'cxx': ['-O3',
                                                      ] + version_dependent_macros + generator_flag,
                                              'nvcc':['-O3',
                                                      '-gencode', 'arch=compute_80,code=sm_80',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
Masaki Kozuki's avatar
Masaki Kozuki committed
509
510
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag + cc_flag},
                          include_dirs=[os.path.join(this_dir, "apex/contrib/csrc"), os.path.join(this_dir, "apex/contrib/csrc/fmha/src")]))
yjk21's avatar
yjk21 committed
511

ptrblck's avatar
ptrblck committed
512

513
if "--fast_multihead_attn" in sys.argv or "--cuda_ext" in sys.argv:
514
    from torch.utils.cpp_extension import CUDAExtension
515
516
    if "--fast_multihead_attn" in sys.argv:
        sys.argv.remove("--fast_multihead_attn")
517
518

    from torch.utils.cpp_extension import BuildExtension
519
    cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
520

521
    if torch.utils.cpp_extension.CUDA_HOME is None and not IS_ROCM_PYTORCH:
522
523
        raise RuntimeError("--fast_multihead_attn was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
ptrblck's avatar
ptrblck committed
524
525
        # Check, if CUDA11 is installed for compute capability 8.0
        cc_flag = []
526
        if not IS_ROCM_PYTORCH:
527
            _, bare_metal_major, _ = get_cuda_bare_metal_version(torch.utils.cpp_extension.CUDA_HOME)
528
529
530
            if int(bare_metal_major) >= 11:
                cc_flag.append('-gencode')
                cc_flag.append('arch=compute_80,code=sm_80')
531
532
                cc_flag.append('-gencode')
                cc_flag.append('arch=compute_86,code=sm_86')
ptrblck's avatar
ptrblck committed
533

534
        subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/multihead_attn/cutlass"])
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
        nvcc_args_mha = ['-O3',
                         '-gencode',
                         'arch=compute_70,code=sm_70',
                         '-Iapex/contrib/csrc/multihead_attn/cutlass',
                         '-U__CUDA_NO_HALF_OPERATORS__',
                         '-U__CUDA_NO_HALF_CONVERSIONS__',
                         '--expt-relaxed-constexpr',
                         '--expt-extended-lambda',
                         '--use_fast_math'] + version_dependent_macros + generator_flag + cc_flag
        hipcc_args_mha = ['-O3',
                          '-Iapex/contrib/csrc/multihead_attn/cutlass',
                          '-I/opt/rocm/include/hiprand',
                          '-I/opt/rocm/include/rocrand',
                          '-U__HIP_NO_HALF_OPERATORS__',
                          '-U__HIP_NO_HALF_CONVERSIONS__'] + version_dependent_macros + generator_flag
550
        if found_Backward_Pass_Guard:
551
552
553
            hipcc_args_mha = hipcc_args_mha + ['-DBACKWARD_PASS_GUARD'] + ['-DBACKWARD_PASS_GUARD_CLASS=BackwardPassGuard']
        if found_ROCmBackward_Pass_Guard:
            hipcc_args_mha = hipcc_args_mha + ['-DBACKWARD_PASS_GUARD'] + ['-DBACKWARD_PASS_GUARD_CLASS=ROCmBackwardPassGuard']
554

555
        ext_modules.append(
556
557
558
559
560
561
562
563
564
565
566
567
568
569
            CUDAExtension(
                name='fast_multihead_attn',
                sources=[
                    'apex/contrib/csrc/multihead_attn/multihead_attn_frontend.cpp',
                    'apex/contrib/csrc/multihead_attn/additive_masked_softmax_dropout_cuda.cu',
                    "apex/contrib/csrc/multihead_attn/masked_softmax_dropout_cuda.cu",
                    "apex/contrib/csrc/multihead_attn/encdec_multihead_attn_cuda.cu",
                    "apex/contrib/csrc/multihead_attn/encdec_multihead_attn_norm_add_cuda.cu",
                    "apex/contrib/csrc/multihead_attn/self_multihead_attn_cuda.cu",
                    "apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_additive_mask_cuda.cu",
                    "apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_cuda.cu",
                    "apex/contrib/csrc/multihead_attn/self_multihead_attn_norm_add_cuda.cu",
                ],
                include_dirs=[os.path.join(this_dir, 'csrc'),
570
                                        os.path.join(this_dir, 'apex/contrib/csrc/multihead_attn')],
ptrblck's avatar
ptrblck committed
571
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
572
573
574
                                              'nvcc':nvcc_args_mha if not IS_ROCM_PYTORCH else hipcc_args_mha}
            )
        )
575

576
577
578
579
580
581
582
if "--transducer" in sys.argv or "--cuda_ext" in sys.argv:
    if "--transducer" in sys.argv:
        sys.argv.remove("--transducer")
    
    if not IS_ROCM_PYTORCH:
        raise_if_cuda_home_none("--transducer")

583
584
585
586
587
588
589
590
    ext_modules.append(
        CUDAExtension(
            name="transducer_joint_cuda",
            sources=[
                "apex/contrib/csrc/transducer/transducer_joint.cpp",
                "apex/contrib/csrc/transducer/transducer_joint_kernel.cu",
            ],
            extra_compile_args={
591
                "cxx": ["-O3"] + version_dependent_macros + generator_flag,
592
593
                "nvcc": append_nvcc_threads(["-O3"] + version_dependent_macros + generator_flag) if not IS_ROCM_PYTORCH
                        else ["-O3"] + version_dependent_macros + generator_flag,
594
595
596
597
598
599
600
601
602
603
604
605
606
607
            },
            include_dirs=[os.path.join(this_dir, "csrc"), os.path.join(this_dir, "apex/contrib/csrc/multihead_attn")],
        )
    )
    ext_modules.append(
        CUDAExtension(
            name="transducer_loss_cuda",
            sources=[
                "apex/contrib/csrc/transducer/transducer_loss.cpp",
                "apex/contrib/csrc/transducer/transducer_loss_kernel.cu",
            ],
            include_dirs=[os.path.join(this_dir, "csrc")],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
608
609
                "nvcc": append_nvcc_threads(["-O3"] + version_dependent_macros) if not IS_ROCM_PYTORCH
                        else ["-O3"] + version_dependent_macros,
610
611
612
            },
        )
    )
613

614
# note (mkozuki): Now `--fast_bottleneck` option (i.e. apex/contrib/bottleneck) depends on `--peer_memory` and `--nccl_p2p`.
615
616
if "--fast_bottleneck" in sys.argv:
    sys.argv.remove("--fast_bottleneck")
617
    raise_if_cuda_home_none("--fast_bottleneck")
618
619
620
621
622
623
624
625
626
    if check_cudnn_version_and_warn("--fast_bottleneck", 8400):
        subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/cudnn-frontend/"])
        ext_modules.append(
            CUDAExtension(
                name="fast_bottleneck",
                sources=["apex/contrib/csrc/bottleneck/bottleneck.cpp"],
                include_dirs=[os.path.join(this_dir, "apex/contrib/csrc/cudnn-frontend/include")],
                extra_compile_args={"cxx": ["-O3"] + version_dependent_macros + generator_flag},
            )
627
        )
628

629
630
631
if "--peer_memory" in sys.argv or "--cuda_ext" in sys.argv:
    if "--peer_memory" in sys.argv:
        sys.argv.remove("--peer_memory")
632
633
634
635

    if not IS_ROCM_PYTORCH:
        raise_if_cuda_home_none("--peer_memory")

Thor Johnsen's avatar
Thor Johnsen committed
636
637
    ext_modules.append(
        CUDAExtension(
638
            name="peer_memory_cuda",
Thor Johnsen's avatar
Thor Johnsen committed
639
640
641
642
643
644
645
646
            sources=[
                "apex/contrib/csrc/peer_memory/peer_memory_cuda.cu",
                "apex/contrib/csrc/peer_memory/peer_memory.cpp",
            ],
            extra_compile_args={"cxx": ["-O3"] + version_dependent_macros + generator_flag},
        )
    )

647
648
649
if "--nccl_p2p" in sys.argv or "--cuda_ext" in sys.argv:
    if "--nccl_p2p" in sys.argv:
        sys.argv.remove("--nccl_p2p")
650
651
652
653

    if not IS_ROCM_PYTORCH:
        raise_if_cuda_home_none("--nccl_p2p")

Thor Johnsen's avatar
Thor Johnsen committed
654
655
    ext_modules.append(
        CUDAExtension(
656
            name="nccl_p2p_cuda",
Thor Johnsen's avatar
Thor Johnsen committed
657
658
659
660
661
662
663
664
            sources=[
                "apex/contrib/csrc/nccl_p2p/nccl_p2p_cuda.cu",
                "apex/contrib/csrc/nccl_p2p/nccl_p2p.cpp",
            ],
            extra_compile_args={"cxx": ["-O3"] + version_dependent_macros + generator_flag},
        )
    )

Masaki Kozuki's avatar
Masaki Kozuki committed
665

Gil Shomron's avatar
Gil Shomron committed
666
667
668
if "--fused_conv_bias_relu" in sys.argv:
    sys.argv.remove("--fused_conv_bias_relu")
    raise_if_cuda_home_none("--fused_conv_bias_relu")
669
    if check_cudnn_version_and_warn("--fused_conv_bias_relu", 8400):
670
671
        subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/cudnn-frontend/"])
        ext_modules.append(
672
673
674
675
676
677
            CUDAExtension(
                name="fused_conv_bias_relu",
                sources=["apex/contrib/csrc/conv_bias_relu/conv_bias_relu.cpp"],
                include_dirs=[os.path.join(this_dir, "apex/contrib/csrc/cudnn-frontend/include")],
                extra_compile_args={"cxx": ["-O3"] + version_dependent_macros + generator_flag},
            )
Gil Shomron's avatar
Gil Shomron committed
678
        )
679

680
681
if "--cuda_ext" in sys.argv:
    sys.argv.remove("--cuda_ext")
682

Christian Sarofeen's avatar
Christian Sarofeen committed
683
setup(
684
685
686
687
688
689
    name="apex",
    version="0.1",
    packages=find_packages(
        exclude=("build", "csrc", "include", "tests", "dist", "docs", "tests", "examples", "apex.egg-info",)
    ),
    description="PyTorch Extensions written by NVIDIA",
jjsjann123's avatar
jjsjann123 committed
690
    ext_modules=ext_modules,
691
692
    cmdclass=cmdclass,
    #cmdclass={'build_ext': BuildExtension} if ext_modules else {},
ptrblck's avatar
ptrblck committed
693
    extras_require=extras,
Christian Sarofeen's avatar
Christian Sarofeen committed
694
)