setup.py 32.2 KB
Newer Older
1
import torch
2
from setuptools import setup, find_packages
mcarilli's avatar
mcarilli committed
3
import subprocess
4

jjsjann123's avatar
jjsjann123 committed
5
import sys
Marek Kolodziej's avatar
Marek Kolodziej committed
6
import warnings
mcarilli's avatar
mcarilli committed
7
import os
jjsjann123's avatar
jjsjann123 committed
8

9
10
from torch.utils.hipify import hipify_python

11
12
13
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))

14
if not torch.cuda.is_available():
mcarilli's avatar
mcarilli committed
15
16
17
18
19
20
21
22
23
24
25
    # https://github.com/NVIDIA/apex/issues/486
    # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
    # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
    print('\nWarning: Torch did not find available GPUs on this system.\n',
          'If your intention is to cross-compile, this is not an error.\n'
          'By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n'
          'Volta (compute capability 7.0), and Turing (compute capability 7.5).\n'
          'If you wish to cross-compile for a single specific architecture,\n'
          'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n')
    if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
        os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
26

27
print("\n\ntorch.__version__  = {}\n\n".format(torch.__version__))
28
29
30
31
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])

if TORCH_MAJOR == 0 and TORCH_MINOR < 4:
Michael Carilli's avatar
Michael Carilli committed
32
      raise RuntimeError("Apex requires Pytorch 0.4 or newer.\n" +
33
34
                         "The latest stable release can be obtained from https://pytorch.org/")

jjsjann123's avatar
jjsjann123 committed
35
36
37
cmdclass = {}
ext_modules = []

ptrblck's avatar
ptrblck committed
38
extras = {}
Marek Kolodziej's avatar
Marek Kolodziej committed
39
if "--pyprof" in sys.argv:
40
41
42
43
44
    string = "\n\nPyprof has been moved to its own dedicated repository and will " + \
             "soon be removed from Apex.  Please visit\n" + \
             "https://github.com/NVIDIA/PyProf\n" + \
             "for the latest version."
    warnings.warn(string, DeprecationWarning)
Marek Kolodziej's avatar
Marek Kolodziej committed
45
46
    with open('requirements.txt') as f:
        required_packages = f.read().splitlines()
ptrblck's avatar
ptrblck committed
47
        extras['pyprof'] = required_packages
Marek Kolodziej's avatar
Marek Kolodziej committed
48
49
50
51
52
53
54
    try:
        sys.argv.remove("--pyprof")
    except:
        pass
else:
    warnings.warn("Option --pyprof not specified. Not installing PyProf dependencies!")

55
if "--cpp_ext" in sys.argv or "--cuda_ext" in sys.argv:
Michael Carilli's avatar
Michael Carilli committed
56
57
    if TORCH_MAJOR == 0:
        raise RuntimeError("--cpp_ext requires Pytorch 1.0 or later, "
58
                           "found torch.__version__ = {}".format(torch.__version__))
59
60
61
62
63
64
65
66
67
68
    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

if "--cpp_ext" in sys.argv:
    from torch.utils.cpp_extension import CppExtension
    sys.argv.remove("--cpp_ext")
    ext_modules.append(
        CppExtension('apex_C',
                     ['csrc/flatten_unflatten.cpp',]))

mcarilli's avatar
mcarilli committed
69
70
71
72
73
74
75
76
77
78
79
80
81
82
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
    release = output[release_idx].split(".")
    bare_metal_major = release[0]
    bare_metal_minor = release[1][0]
    torch_binary_major = torch.version.cuda.split(".")[0]
    torch_binary_minor = torch.version.cuda.split(".")[1]

    print("\nCompiling cuda extensions with")
    print(raw_output + "from " + cuda_dir + "/bin\n")

    if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
Michael Carilli's avatar
Michael Carilli committed
83
84
85
86
87
88
        raise RuntimeError("Cuda extensions are being compiled with a version of Cuda that does " +
                           "not match the version used to compile Pytorch binaries.  " +
                           "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
                           "In some cases, a minor-version mismatch will not cause later errors:  " +
                           "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798.  "
                           "You can try commenting out this check (at your own risk).")
mcarilli's avatar
mcarilli committed
89

90
91
92
93
94
95
96
97
def check_if_rocm_pytorch():
    is_rocm_pytorch = False
    if torch.__version__ >= '1.5':
        from torch.utils.cpp_extension import ROCM_HOME
        is_rocm_pytorch = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False

    return is_rocm_pytorch

mcarilli's avatar
mcarilli committed
98
99
100
101
102
103
104
105
106
107
108
# Set up macros for forward/backward compatibility hack around
# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
# and
# https://github.com/NVIDIA/apex/issues/456
# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
    version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
    version_ge_1_3 = ['-DVERSION_GE_1_3']
109
110
111
112
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
    version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
mcarilli's avatar
mcarilli committed
113

114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
if "--distributed_lamb" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--distributed_lamb")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

    if torch.utils.cpp_extension.CUDA_HOME is None:
        raise RuntimeError("--distributed_lamb was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
        ext_modules.append(
            CUDAExtension(name='distributed_lamb_cuda',
                          sources=['apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb.cpp',
                                   'apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb_kernel.cu'],
                          include_dirs=[os.path.join(this_dir, 'csrc')],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
                                              'nvcc':['-O3',
                                                      '--use_fast_math'] + version_dependent_macros}))

jjsjann123's avatar
jjsjann123 committed
133
if "--cuda_ext" in sys.argv:
134
    from torch.utils.cpp_extension import CUDAExtension
jjsjann123's avatar
jjsjann123 committed
135
    sys.argv.remove("--cuda_ext")
136

137
138
139
140
    is_rocm_pytorch = False
    if torch.__version__ >= '1.5':
        from torch.utils.cpp_extension import ROCM_HOME
        is_rocm_pytorch = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
141

142
    if torch.utils.cpp_extension.CUDA_HOME is None and (not is_rocm_pytorch):
Michael Carilli's avatar
Michael Carilli committed
143
        raise RuntimeError("--cuda_ext was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
144
    else:
145
146
147
148
149
150
151
152
        if not is_rocm_pytorch:
            check_cuda_torch_binary_vs_bare_metal(torch.utils.cpp_extension.CUDA_HOME)

        if is_rocm_pytorch:
            import shutil
            with hipify_python.GeneratedFileCleaner(keep_intermediates=True) as clean_ctx:
                hipify_python.hipify(project_directory=this_dir, output_directory=this_dir, includes="csrc/*",
                                        show_detailed=True, is_pytorch_extension=True, clean_ctx=clean_ctx)
153
154
155
            if torch.__version__ < '1.8':
                shutil.copy("csrc/compat.h", "csrc/hip/compat.h")
                shutil.copy("csrc/type_shim.h", "csrc/hip/type_shim.h")
156
157
158
159
160
161
162
163
164
165
166
167

        if not is_rocm_pytorch:
            ext_modules.append(
                CUDAExtension(name='amp_C',
                              sources=['csrc/amp_C_frontend.cpp',
                                       'csrc/multi_tensor_sgd_kernel.cu',
                                       'csrc/multi_tensor_scale_kernel.cu',
                                       'csrc/multi_tensor_axpby_kernel.cu',
                                       'csrc/multi_tensor_l2norm_kernel.cu',
                                       'csrc/multi_tensor_lamb_stage_1.cu',
                                       'csrc/multi_tensor_lamb_stage_2.cu',
                                       'csrc/multi_tensor_adam.cu',
168
                                       'csrc/multi_tensor_adagrad.cu',
169
170
171
172
173
174
175
176
177
                                       'csrc/multi_tensor_novograd.cu',
                                       'csrc/multi_tensor_lamb.cu'],
                              extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                                  'nvcc':['-lineinfo',
                                                          '-O3',
                                                          # '--resource-usage',
                                                          '--use_fast_math'] + version_dependent_macros}))
        else:
            print ("INFO: Building Multitensor apply extension")
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
            multi_tensor_sources_v1_8 = [
                                            'csrc/amp_C_frontend.cpp',
                                            'csrc/multi_tensor_sgd_kernel.hip',
                                            'csrc/multi_tensor_scale_kernel.hip',
                                            'csrc/multi_tensor_axpby_kernel.hip',
                                            'csrc/multi_tensor_l2norm_kernel.hip',
                                            'csrc/multi_tensor_lamb_stage_1.hip',
                                            'csrc/multi_tensor_lamb_stage_2.hip',
                                            'csrc/multi_tensor_adam.hip',
                                            'csrc/multi_tensor_adagrad.hip',
                                            'csrc/multi_tensor_novograd.hip',
                                            'csrc/multi_tensor_lamb.hip'
                                        ]

            multi_tensor_sources_other = [
                                           'csrc/amp_C_frontend.cpp',
                                           'csrc/hip/multi_tensor_sgd_kernel.hip',
                                           'csrc/hip/multi_tensor_scale_kernel.hip',
                                           'csrc/hip/multi_tensor_axpby_kernel.hip',
                                           'csrc/hip/multi_tensor_l2norm_kernel.hip',
                                           'csrc/hip/multi_tensor_lamb_stage_1.hip',
                                           'csrc/hip/multi_tensor_lamb_stage_2.hip',
                                           'csrc/hip/multi_tensor_adam.hip',
                                           'csrc/hip/multi_tensor_adagrad.hip',
                                           'csrc/hip/multi_tensor_novograd.hip',
                                           'csrc/hip/multi_tensor_lamb.hip',
                                         ]
205
            ext_modules.append(
206
207
208
                  CUDAExtension(name='amp_C',
                                sources=multi_tensor_sources_v1_8 if torch.__version__ >= '1.8' else multi_tensor_sources_other,
                                extra_compile_args=['-O3'] + version_dependent_macros))
209
210
211
212
213
214
215
216
217

        if not is_rocm_pytorch:
            ext_modules.append(
                CUDAExtension(name='syncbn',
                              sources=['csrc/syncbn.cpp',
                                       'csrc/welford.cu'],
                              extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                                  'nvcc':['-O3'] + version_dependent_macros}))
        else:
218
            print ("INFO: Building syncbn extension.")
219
220
            syncbn_sources_v1_8 = ['csrc/syncbn.cpp', 'csrc/welford.hip']
            syncbn_sources_other = ['csrc/syncbn.cpp', 'csrc/hip/welford.hip']
221
222
            ext_modules.append(
                CUDAExtension(name='syncbn',
223
                              sources=syncbn_sources_v1_8 if torch.__version__ >= '1.8' else syncbn_sources_other,
224
                              extra_compile_args=['-O3'] + version_dependent_macros))
225

226
227
228
229
230
231
232
233
234
235
        if not is_rocm_pytorch:
            ext_modules.append(
                CUDAExtension(name='fused_layer_norm_cuda',
                              sources=['csrc/layer_norm_cuda.cpp',
                                       'csrc/layer_norm_cuda_kernel.cu'],
                              extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                                  'nvcc':['-maxrregcount=50',
                                                          '-O3',
                                                          '--use_fast_math'] + version_dependent_macros}))
        else:
236
            print ("INFO: Building FusedLayerNorm extension.")
237
238
            layer_norm_sources_v1_8 = ['csrc/layer_norm_cuda.cpp', 'csrc/layer_norm_hip_kernel.hip']
            layer_norm_sources_other = ['csrc/layer_norm_cuda.cpp', 'csrc/hip/layer_norm_hip_kernel.hip']
239
            ext_modules.append(
240
241
242
                 CUDAExtension(name='fused_layer_norm_cuda',
                               sources = layer_norm_sources_v1_8 if torch.__version__ >= '1.8' else layer_norm_sources_other,
                               extra_compile_args=['-O3'] + version_dependent_macros)) 
243
244
245
246
247
248
249
250
251

        if not is_rocm_pytorch:
            ext_modules.append(
                CUDAExtension(name='mlp_cuda',
                              sources=['csrc/mlp.cpp',
                                       'csrc/mlp_cuda.cu'],
                              extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                                  'nvcc':['-O3'] + version_dependent_macros}))
        else:
252
            print ("INFO: Building MLP extension")
253
254
            mlp_sources_v1_8 = ['csrc/mlp.cpp', 'csrc/mlp_hip.hip']
            mlp_sources_other = ['csrc/mlp.cpp', 'csrc/hip/mlp_hip.hip']
255
256
            ext_modules.append(
                CUDAExtension(name='mlp_cuda',
257
258
                              sources = mlp_sources_v1_8 if torch.__version__ >= '1.8' else mlp_sources_other,
                              extra_compile_args=['-O3'] + version_dependent_macros))
259

jjsjann123's avatar
jjsjann123 committed
260
261
262
263
264
265
266
267
if "--bnp" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--bnp")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

    if torch.utils.cpp_extension.CUDA_HOME is None:
268
        raise RuntimeError("--bnp was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
jjsjann123's avatar
jjsjann123 committed
269
270
271
272
273
274
275
    else:
        ext_modules.append(
            CUDAExtension(name='bnp',
                          sources=['apex/contrib/csrc/groupbn/batch_norm.cu',
                                   'apex/contrib/csrc/groupbn/ipc.cu',
                                   'apex/contrib/csrc/groupbn/interface.cpp',
                                   'apex/contrib/csrc/groupbn/batch_norm_add_relu.cu'],
276
                          include_dirs=[os.path.join(this_dir, 'csrc')],
mcarilli's avatar
mcarilli committed
277
                          extra_compile_args={'cxx': [] + version_dependent_macros,
jjsjann123's avatar
jjsjann123 committed
278
279
280
                                              'nvcc':['-DCUDA_HAS_FP16=1',
                                                      '-D__CUDA_NO_HALF_OPERATORS__',
                                                      '-D__CUDA_NO_HALF_CONVERSIONS__',
281
                                                      '-D__CUDA_NO_HALF2_OPERATORS__'] + version_dependent_macros}))
jjsjann123's avatar
jjsjann123 committed
282

283
284
285
286
287
288
289
if "--xentropy" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--xentropy")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

290
291
292
    is_rocm_pytorch = check_if_rocm_pytorch()

    if torch.utils.cpp_extension.CUDA_HOME is None and (not is_rocm_pytorch):
293
294
        raise RuntimeError("--xentropy was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
295
296
297
298
299
300
301
302
303
        if not is_rocm_pytorch:
            ext_modules.append(
                CUDAExtension(name='xentropy_cuda',
                              sources=['apex/contrib/csrc/xentropy/interface.cpp',
                                       'apex/contrib/csrc/xentropy/xentropy_kernel.cu'],
                              include_dirs=[os.path.join(this_dir, 'csrc')],
                              extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                                  'nvcc':['-O3'] + version_dependent_macros}))
        else:
304
305
306
             xentropy_sources_v1_8 = ['apex/contrib/csrc/xentropy/interface.cpp', 'apex/contrib/csrc/xentropy/xentropy_kernel.hip']
             xentropy_sources_other = ['apex/contrib/csrc/xentropy/interface.cpp', 'apex/contrib/csrc/xentropy/hip/xentropy_kernel.hip']

307
             ext_modules.append(
308
309
310
311
                 CUDAExtension(name='xentropy_cuda',
                               sources = xentropy_sources_v1_8 if torch.__version__ >= '1.8' else xentropy_sources_other,
                               include_dirs=[os.path.join(this_dir, 'csrc') if torch.__version__ >= '1.8' else os.path.join(this_dir, 'csrc/hip')],
                               extra_compile_args=['-O3'] + version_dependent_macros))
312
   
313

314
315
316
317
318
319
320
if "--deprecated_fused_adam" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--deprecated_fused_adam")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

321
322
323
    is_rocm_pytorch = check_if_rocm_pytorch()

    if torch.utils.cpp_extension.CUDA_HOME is None and (not is_rocm_pytorch):
324
325
        raise RuntimeError("--deprecated_fused_adam was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
326
327
328
329
330
331
332
333
334
335
336
        if not is_rocm_pytorch:
            ext_modules.append(
                CUDAExtension(name='fused_adam_cuda',
                              sources=['apex/contrib/csrc/optimizers/fused_adam_cuda.cpp',
                                       'apex/contrib/csrc/optimizers/fused_adam_cuda_kernel.cu'],
                              include_dirs=[os.path.join(this_dir, 'csrc')],
                              extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
                                                  'nvcc':['-O3',
                                                          '--use_fast_math'] + version_dependent_macros}))
        else:
            print ("INFO: Building deprecated fused adam.")
337
338
339
340
341
342
            fused_adam_sources_v1_8 = ['apex/contrib/csrc/optimizers/fused_adam_cuda.cpp',
                                       'apex/contrib/csrc/optimizers/fused_adam_hip_kernel.hip']

            fused_adam_sources_other = ['apex/contrib/csrc/optimizers/fused_adam_cuda.cpp',
                                        'apex/contrib/csrc/optimizers/hip/fused_adam_hip_kernel.hip']

343
344
            ext_modules.append(
                CUDAExtension(name='fused_adam_cuda',
345
346
                              sources = fused_adam_sources_v1_8 if torch.__version__ >= '1.8' else fused_adam_sources_other,
                              include_dirs=[os.path.join(this_dir, 'csrc') if torch.__version__ >= '1.8' else os.path.join(this_dir, 'csrc/hip')],
347
                              extra_compile_args=['-O3'] + version_dependent_macros))
348

349
350
351
352
353
354
355
if "--deprecated_fused_lamb" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--deprecated_fused_lamb")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

356
357
358
    is_rocm_pytorch = check_if_rocm_pytorch()

    if torch.utils.cpp_extension.CUDA_HOME is None and (not is_rocm_pytorch):
359
360
        raise RuntimeError("--deprecated_fused_lamb was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
361
362
363
364
365
366
367
368
369
370
371
372
        if not is_rocm_pytorch:
            ext_modules.append(
                CUDAExtension(name='fused_lamb_cuda',
                              sources=['apex/contrib/csrc/optimizers/fused_lamb_cuda.cpp',
                                       'apex/contrib/csrc/optimizers/fused_lamb_cuda_kernel.cu',
                                       'csrc/multi_tensor_l2norm_kernel.cu'],
                              include_dirs=[os.path.join(this_dir, 'csrc')],
                              extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
                                                  'nvcc':['-O3',
                                                          '--use_fast_math'] + version_dependent_macros}))
        else:
            print ("INFO: Building deprecated fused lamb.")
373
374
375
376
377
378
            fused_lamb_sources_v1_8 = ['apex/contrib/csrc/optimizers/fused_lamb_cuda.cpp', 
                                  'apex/contrib/csrc/optimizers/fused_lamb_hip_kernel.hip']

            fused_lamb_sources_other = ['apex/contrib/csrc/optimizers/fused_lamb_cuda.cpp',
                                        'apex/contrib/csrc/optimizers/hip/fused_lamb_hip_kernel.hip']

379
380
            ext_modules.append(
                CUDAExtension(name='fused_lamb_cuda',
381
382
                              sources = fused_lamb_sources_v1_8 if torch.__version__ >= '1.8' else fused_lamb_sources_other,
                              include_dirs = [os.path.join(this_dir, 'csrc') if torch.__version__ >= '1.8' else os.path.join(this_dir, 'csrc/hip')],
383
                              extra_compile_args=['-O3'] + version_dependent_macros))
384

385
# Check, if ATen/CUDAGenerator.h is found, otherwise use the new ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026
ptrblck's avatar
ptrblck committed
386
387
388
389
390
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, 'include', 'ATen', 'CUDAGenerator.h')):
    generator_flag = ['-DOLD_GENERATOR']

391
392
393
394
395
if "--fast_multihead_attn" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--fast_multihead_attn")

    from torch.utils.cpp_extension import BuildExtension
396
    cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
397
398
399
400
401

    if torch.utils.cpp_extension.CUDA_HOME is None:
        raise RuntimeError("--fast_multihead_attn was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
        subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/multihead_attn/cutlass"])
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
        ext_modules.append(
            CUDAExtension(name='fast_additive_mask_softmax_dropout',
                          sources=['apex/contrib/csrc/multihead_attn/additive_masked_softmax_dropout.cpp',
                                   'apex/contrib/csrc/multihead_attn/additive_masked_softmax_dropout_cuda.cu'],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
                                              'nvcc':['-O3',
                                                      '-gencode', 'arch=compute_70,code=sm_70',
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
        ext_modules.append(
            CUDAExtension(name='fast_mask_softmax_dropout',
                          sources=['apex/contrib/csrc/multihead_attn/masked_softmax_dropout.cpp',
                                   'apex/contrib/csrc/multihead_attn/masked_softmax_dropout_cuda.cu'],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
                                              'nvcc':['-O3',
                                                      '-gencode', 'arch=compute_70,code=sm_70',
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
        ext_modules.append(
            CUDAExtension(name='fast_self_multihead_attn_bias_additive_mask',
                          sources=['apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_additive_mask.cpp',
                                   'apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_additive_mask_cuda.cu'],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
                                              'nvcc':['-O3',
                                                      '-gencode', 'arch=compute_70,code=sm_70',
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
        ext_modules.append(
            CUDAExtension(name='fast_self_multihead_attn_bias',
                          sources=['apex/contrib/csrc/multihead_attn/self_multihead_attn_bias.cpp',
                                   'apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_cuda.cu'],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
                                              'nvcc':['-O3',
                                                      '-gencode', 'arch=compute_70,code=sm_70',
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
454
455
        ext_modules.append(
            CUDAExtension(name='fast_self_multihead_attn',
456
                          sources=['apex/contrib/csrc/multihead_attn/self_multihead_attn.cpp',
457
                                   'apex/contrib/csrc/multihead_attn/self_multihead_attn_cuda.cu'],
ptrblck's avatar
ptrblck committed
458
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
459
                                              'nvcc':['-O3',
460
                                                      '-gencode', 'arch=compute_70,code=sm_70',
461
462
463
464
465
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
ptrblck's avatar
ptrblck committed
466
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
467
468
        ext_modules.append(
            CUDAExtension(name='fast_self_multihead_attn_norm_add',
469
                          sources=['apex/contrib/csrc/multihead_attn/self_multihead_attn_norm_add.cpp',
470
                                   'apex/contrib/csrc/multihead_attn/self_multihead_attn_norm_add_cuda.cu'],
ptrblck's avatar
ptrblck committed
471
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
472
                                              'nvcc':['-O3',
473
                                                      '-gencode', 'arch=compute_70,code=sm_70',
474
475
476
477
478
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
ptrblck's avatar
ptrblck committed
479
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
480
481
        ext_modules.append(
            CUDAExtension(name='fast_encdec_multihead_attn',
482
                          sources=['apex/contrib/csrc/multihead_attn/encdec_multihead_attn.cpp',
483
                                   'apex/contrib/csrc/multihead_attn/encdec_multihead_attn_cuda.cu'],
ptrblck's avatar
ptrblck committed
484
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
485
                                              'nvcc':['-O3',
486
                                                      '-gencode', 'arch=compute_70,code=sm_70',
487
488
489
490
491
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
ptrblck's avatar
ptrblck committed
492
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
493
494
        ext_modules.append(
            CUDAExtension(name='fast_encdec_multihead_attn_norm_add',
495
                          sources=['apex/contrib/csrc/multihead_attn/encdec_multihead_attn_norm_add.cpp',
496
                                   'apex/contrib/csrc/multihead_attn/encdec_multihead_attn_norm_add_cuda.cu'],
ptrblck's avatar
ptrblck committed
497
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
498
                                              'nvcc':['-O3',
499
                                                      '-gencode', 'arch=compute_70,code=sm_70',
500
501
502
503
504
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
ptrblck's avatar
ptrblck committed
505
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
506

Christian Sarofeen's avatar
Christian Sarofeen committed
507
setup(
508
509
    name='apex',
    version='0.1',
510
511
512
513
    packages=find_packages(exclude=('build',
                                    'csrc',
                                    'include',
                                    'tests',
514
515
516
517
518
                                    'dist',
                                    'docs',
                                    'tests',
                                    'examples',
                                    'apex.egg-info',)),
Christian Sarofeen's avatar
Christian Sarofeen committed
519
    description='PyTorch Extensions written by NVIDIA',
jjsjann123's avatar
jjsjann123 committed
520
521
    ext_modules=ext_modules,
    cmdclass=cmdclass,
ptrblck's avatar
ptrblck committed
522
    extras_require=extras,
Christian Sarofeen's avatar
Christian Sarofeen committed
523
)