setup.py 27.5 KB
Newer Older
1
import torch
2
from setuptools import setup, find_packages
mcarilli's avatar
mcarilli committed
3
import subprocess
4

jjsjann123's avatar
jjsjann123 committed
5
import sys
Marek Kolodziej's avatar
Marek Kolodziej committed
6
import warnings
mcarilli's avatar
mcarilli committed
7
import os
jjsjann123's avatar
jjsjann123 committed
8

9
10
from torch.utils.hipify import hipify_python

11
12
13
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))

14
if not torch.cuda.is_available():
mcarilli's avatar
mcarilli committed
15
16
17
18
19
20
21
22
23
24
25
    # https://github.com/NVIDIA/apex/issues/486
    # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
    # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
    print('\nWarning: Torch did not find available GPUs on this system.\n',
          'If your intention is to cross-compile, this is not an error.\n'
          'By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n'
          'Volta (compute capability 7.0), and Turing (compute capability 7.5).\n'
          'If you wish to cross-compile for a single specific architecture,\n'
          'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n')
    if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
        os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
26

27
print("\n\ntorch.__version__  = {}\n\n".format(torch.__version__))
28
29
30
31
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])

if TORCH_MAJOR == 0 and TORCH_MINOR < 4:
Michael Carilli's avatar
Michael Carilli committed
32
      raise RuntimeError("Apex requires Pytorch 0.4 or newer.\n" +
33
34
                         "The latest stable release can be obtained from https://pytorch.org/")

jjsjann123's avatar
jjsjann123 committed
35
36
37
cmdclass = {}
ext_modules = []

ptrblck's avatar
ptrblck committed
38
extras = {}
Marek Kolodziej's avatar
Marek Kolodziej committed
39
if "--pyprof" in sys.argv:
40
41
42
43
44
    string = "\n\nPyprof has been moved to its own dedicated repository and will " + \
             "soon be removed from Apex.  Please visit\n" + \
             "https://github.com/NVIDIA/PyProf\n" + \
             "for the latest version."
    warnings.warn(string, DeprecationWarning)
Marek Kolodziej's avatar
Marek Kolodziej committed
45
46
    with open('requirements.txt') as f:
        required_packages = f.read().splitlines()
ptrblck's avatar
ptrblck committed
47
        extras['pyprof'] = required_packages
Marek Kolodziej's avatar
Marek Kolodziej committed
48
49
50
51
52
53
54
    try:
        sys.argv.remove("--pyprof")
    except:
        pass
else:
    warnings.warn("Option --pyprof not specified. Not installing PyProf dependencies!")

55
if "--cpp_ext" in sys.argv or "--cuda_ext" in sys.argv:
Michael Carilli's avatar
Michael Carilli committed
56
57
    if TORCH_MAJOR == 0:
        raise RuntimeError("--cpp_ext requires Pytorch 1.0 or later, "
58
                           "found torch.__version__ = {}".format(torch.__version__))
59
60
61
62
63
64
65
66
67
68
    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

if "--cpp_ext" in sys.argv:
    from torch.utils.cpp_extension import CppExtension
    sys.argv.remove("--cpp_ext")
    ext_modules.append(
        CppExtension('apex_C',
                     ['csrc/flatten_unflatten.cpp',]))

mcarilli's avatar
mcarilli committed
69
70
71
72
73
74
75
76
77
78
79
80
81
82
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
    release = output[release_idx].split(".")
    bare_metal_major = release[0]
    bare_metal_minor = release[1][0]
    torch_binary_major = torch.version.cuda.split(".")[0]
    torch_binary_minor = torch.version.cuda.split(".")[1]

    print("\nCompiling cuda extensions with")
    print(raw_output + "from " + cuda_dir + "/bin\n")

    if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
Michael Carilli's avatar
Michael Carilli committed
83
84
85
86
87
88
        raise RuntimeError("Cuda extensions are being compiled with a version of Cuda that does " +
                           "not match the version used to compile Pytorch binaries.  " +
                           "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
                           "In some cases, a minor-version mismatch will not cause later errors:  " +
                           "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798.  "
                           "You can try commenting out this check (at your own risk).")
mcarilli's avatar
mcarilli committed
89

mcarilli's avatar
mcarilli committed
90
91
92
93
94
95
96
97
98
99
100
# Set up macros for forward/backward compatibility hack around
# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
# and
# https://github.com/NVIDIA/apex/issues/456
# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
    version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
    version_ge_1_3 = ['-DVERSION_GE_1_3']
101
102
103
104
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
    version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
mcarilli's avatar
mcarilli committed
105

106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
if "--distributed_lamb" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--distributed_lamb")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

    if torch.utils.cpp_extension.CUDA_HOME is None:
        raise RuntimeError("--distributed_lamb was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
        ext_modules.append(
            CUDAExtension(name='distributed_lamb_cuda',
                          sources=['apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb.cpp',
                                   'apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb_kernel.cu'],
                          include_dirs=[os.path.join(this_dir, 'csrc')],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
                                              'nvcc':['-O3',
                                                      '--use_fast_math'] + version_dependent_macros}))

jjsjann123's avatar
jjsjann123 committed
125
if "--cuda_ext" in sys.argv:
126
    from torch.utils.cpp_extension import CUDAExtension
jjsjann123's avatar
jjsjann123 committed
127
    sys.argv.remove("--cuda_ext")
128

129
130
131
132
    is_rocm_pytorch = False
    if torch.__version__ >= '1.5':
        from torch.utils.cpp_extension import ROCM_HOME
        is_rocm_pytorch = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
133

134
    if torch.utils.cpp_extension.CUDA_HOME is None and (not is_rocm_pytorch):
Michael Carilli's avatar
Michael Carilli committed
135
        raise RuntimeError("--cuda_ext was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
136
    else:
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
        if not is_rocm_pytorch:
            check_cuda_torch_binary_vs_bare_metal(torch.utils.cpp_extension.CUDA_HOME)

        if is_rocm_pytorch:
            import shutil
            with hipify_python.GeneratedFileCleaner(keep_intermediates=True) as clean_ctx:
                hipify_python.hipify(project_directory=this_dir, output_directory=this_dir, includes="csrc/*",
                                        show_detailed=True, is_pytorch_extension=True, clean_ctx=clean_ctx)
            shutil.copy("csrc/compat.h", "csrc/hip/compat.h")
            shutil.copy("csrc/type_shim.h", "csrc/hip/type_shim.h")

        if not is_rocm_pytorch:
            ext_modules.append(
                CUDAExtension(name='amp_C',
                              sources=['csrc/amp_C_frontend.cpp',
                                       'csrc/multi_tensor_sgd_kernel.cu',
                                       'csrc/multi_tensor_scale_kernel.cu',
                                       'csrc/multi_tensor_axpby_kernel.cu',
                                       'csrc/multi_tensor_l2norm_kernel.cu',
                                       'csrc/multi_tensor_lamb_stage_1.cu',
                                       'csrc/multi_tensor_lamb_stage_2.cu',
                                       'csrc/multi_tensor_adam.cu',
159
                                       'csrc/multi_tensor_adagrad.cu',
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
                                       'csrc/multi_tensor_novograd.cu',
                                       'csrc/multi_tensor_lamb.cu'],
                              extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                                  'nvcc':['-lineinfo',
                                                          '-O3',
                                                          # '--resource-usage',
                                                          '--use_fast_math'] + version_dependent_macros}))
        else:
            print ("INFO: Building Multitensor apply extension")
            ext_modules.append(
                CUDAExtension(name='amp_C',
                              sources=['csrc/amp_C_frontend.cpp',
                                       'csrc/hip/multi_tensor_sgd_kernel.hip',
                                       'csrc/hip/multi_tensor_scale_kernel.hip',
                                       'csrc/hip/multi_tensor_axpby_kernel.hip',
                                       'csrc/hip/multi_tensor_l2norm_kernel.hip',
                                       'csrc/hip/multi_tensor_lamb_stage_1.hip',
                                       'csrc/hip/multi_tensor_lamb_stage_2.hip',
                                       'csrc/hip/multi_tensor_adam.hip',
179
                                       'csrc/hip/multi_tensor_adagrad.hip',
180
181
                                       'csrc/hip/multi_tensor_novograd.hip',
                                       'csrc/hip/multi_tensor_lamb.hip'],
182
                              extra_compile_args=['-O3'] + version_dependent_macros))
183
184
185
186
187
188
189
190
191

        if not is_rocm_pytorch:
            ext_modules.append(
                CUDAExtension(name='syncbn',
                              sources=['csrc/syncbn.cpp',
                                       'csrc/welford.cu'],
                              extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                                  'nvcc':['-O3'] + version_dependent_macros}))
        else:
192
193
194
195
196
197
            print ("INFO: Building syncbn extension.")
            ext_modules.append(
                CUDAExtension(name='syncbn',
                              sources=['csrc/syncbn.cpp',
                                       'csrc/hip/welford.hip'],
                              extra_compile_args=['-O3'] + version_dependent_macros))
198

199

200
201
202
203
204
205
206
207
208
209
        if not is_rocm_pytorch:
            ext_modules.append(
                CUDAExtension(name='fused_layer_norm_cuda',
                              sources=['csrc/layer_norm_cuda.cpp',
                                       'csrc/layer_norm_cuda_kernel.cu'],
                              extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                                  'nvcc':['-maxrregcount=50',
                                                          '-O3',
                                                          '--use_fast_math'] + version_dependent_macros}))
        else:
210
211
212
213
214
215
216
            print ("INFO: Building FusedLayerNorm extension.")
            ext_modules.append(
                CUDAExtension(name='fused_layer_norm_cuda',
                              sources=['csrc/layer_norm_cuda.cpp',
                                       'csrc/hip/layer_norm_hip_kernel.hip'],
                              extra_compile_args={'cxx' : ['-O3'] + version_dependent_macros,
                                                  'nvcc' : []}))
217
218
219
220
221
222
223
224
225

        if not is_rocm_pytorch:
            ext_modules.append(
                CUDAExtension(name='mlp_cuda',
                              sources=['csrc/mlp.cpp',
                                       'csrc/mlp_cuda.cu'],
                              extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                                  'nvcc':['-O3'] + version_dependent_macros}))
        else:
226
227
228
229
230
231
232
            print ("INFO: Building MLP extension")
            ext_modules.append(
                CUDAExtension(name='mlp_cuda',
                              sources=['csrc/mlp.cpp',
                                       'csrc/hip/mlp_hip.hip'],
                              extra_compile_args={'cxx' : ['-O3'] + version_dependent_macros,
                                                  'nvcc' : []}))
233

jjsjann123's avatar
jjsjann123 committed
234
235
236
237
238
239
240
241
if "--bnp" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--bnp")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

    if torch.utils.cpp_extension.CUDA_HOME is None:
242
        raise RuntimeError("--bnp was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
jjsjann123's avatar
jjsjann123 committed
243
244
245
246
247
248
249
    else:
        ext_modules.append(
            CUDAExtension(name='bnp',
                          sources=['apex/contrib/csrc/groupbn/batch_norm.cu',
                                   'apex/contrib/csrc/groupbn/ipc.cu',
                                   'apex/contrib/csrc/groupbn/interface.cpp',
                                   'apex/contrib/csrc/groupbn/batch_norm_add_relu.cu'],
250
                          include_dirs=[os.path.join(this_dir, 'csrc')],
mcarilli's avatar
mcarilli committed
251
                          extra_compile_args={'cxx': [] + version_dependent_macros,
jjsjann123's avatar
jjsjann123 committed
252
253
254
                                              'nvcc':['-DCUDA_HAS_FP16=1',
                                                      '-D__CUDA_NO_HALF_OPERATORS__',
                                                      '-D__CUDA_NO_HALF_CONVERSIONS__',
255
                                                      '-D__CUDA_NO_HALF2_OPERATORS__'] + version_dependent_macros}))
jjsjann123's avatar
jjsjann123 committed
256

257
258
259
260
261
262
263
264
265
266
267
268
269
270
if "--xentropy" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--xentropy")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

    if torch.utils.cpp_extension.CUDA_HOME is None:
        raise RuntimeError("--xentropy was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
        ext_modules.append(
            CUDAExtension(name='xentropy_cuda',
                          sources=['apex/contrib/csrc/xentropy/interface.cpp',
                                   'apex/contrib/csrc/xentropy/xentropy_kernel.cu'],
271
                          include_dirs=[os.path.join(this_dir, 'csrc')],
mcarilli's avatar
mcarilli committed
272
273
                          extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
                                              'nvcc':['-O3'] + version_dependent_macros}))
274

275
276
277
278
279
280
281
282
283
284
285
286
287
288
if "--deprecated_fused_adam" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--deprecated_fused_adam")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

    if torch.utils.cpp_extension.CUDA_HOME is None:
        raise RuntimeError("--deprecated_fused_adam was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
        ext_modules.append(
            CUDAExtension(name='fused_adam_cuda',
                          sources=['apex/contrib/csrc/optimizers/fused_adam_cuda.cpp',
                                   'apex/contrib/csrc/optimizers/fused_adam_cuda_kernel.cu'],
289
                          include_dirs=[os.path.join(this_dir, 'csrc')],
290
291
292
293
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
                                              'nvcc':['-O3',
                                                      '--use_fast_math'] + version_dependent_macros}))

294
295
296
297
298
299
300
301
302
303
304
if "--deprecated_fused_lamb" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--deprecated_fused_lamb")

    from torch.utils.cpp_extension import BuildExtension
    cmdclass['build_ext'] = BuildExtension

    if torch.utils.cpp_extension.CUDA_HOME is None:
        raise RuntimeError("--deprecated_fused_lamb was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
        ext_modules.append(
Kexin Yu's avatar
Kexin Yu committed
305
            CUDAExtension(name='fused_lamb_cuda',
306
                          sources=['apex/contrib/csrc/optimizers/fused_lamb_cuda.cpp',
Kexin Yu's avatar
Kexin Yu committed
307
308
                                   'apex/contrib/csrc/optimizers/fused_lamb_cuda_kernel.cu',
                                   'csrc/multi_tensor_l2norm_kernel.cu'],
309
310
311
312
313
                          include_dirs=[os.path.join(this_dir, 'csrc')],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
                                              'nvcc':['-O3',
                                                      '--use_fast_math'] + version_dependent_macros}))

314
# Check, if ATen/CUDAGenerator.h is found, otherwise use the new ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026
ptrblck's avatar
ptrblck committed
315
316
317
318
319
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, 'include', 'ATen', 'CUDAGenerator.h')):
    generator_flag = ['-DOLD_GENERATOR']

320
321
322
323
324
if "--fast_multihead_attn" in sys.argv:
    from torch.utils.cpp_extension import CUDAExtension
    sys.argv.remove("--fast_multihead_attn")

    from torch.utils.cpp_extension import BuildExtension
325
    cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
326
327
328
329
330

    if torch.utils.cpp_extension.CUDA_HOME is None:
        raise RuntimeError("--fast_multihead_attn was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
    else:
        subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/multihead_attn/cutlass"])
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
        ext_modules.append(
            CUDAExtension(name='fast_additive_mask_softmax_dropout',
                          sources=['apex/contrib/csrc/multihead_attn/additive_masked_softmax_dropout.cpp',
                                   'apex/contrib/csrc/multihead_attn/additive_masked_softmax_dropout_cuda.cu'],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
                                              'nvcc':['-O3',
                                                      '-gencode', 'arch=compute_70,code=sm_70',
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
        ext_modules.append(
            CUDAExtension(name='fast_mask_softmax_dropout',
                          sources=['apex/contrib/csrc/multihead_attn/masked_softmax_dropout.cpp',
                                   'apex/contrib/csrc/multihead_attn/masked_softmax_dropout_cuda.cu'],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
                                              'nvcc':['-O3',
                                                      '-gencode', 'arch=compute_70,code=sm_70',
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
        ext_modules.append(
            CUDAExtension(name='fast_self_multihead_attn_bias_additive_mask',
                          sources=['apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_additive_mask.cpp',
                                   'apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_additive_mask_cuda.cu'],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
                                              'nvcc':['-O3',
                                                      '-gencode', 'arch=compute_70,code=sm_70',
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
        ext_modules.append(
            CUDAExtension(name='fast_self_multihead_attn_bias',
                          sources=['apex/contrib/csrc/multihead_attn/self_multihead_attn_bias.cpp',
                                   'apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_cuda.cu'],
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
                                              'nvcc':['-O3',
                                                      '-gencode', 'arch=compute_70,code=sm_70',
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
383
384
        ext_modules.append(
            CUDAExtension(name='fast_self_multihead_attn',
385
                          sources=['apex/contrib/csrc/multihead_attn/self_multihead_attn.cpp',
386
                                   'apex/contrib/csrc/multihead_attn/self_multihead_attn_cuda.cu'],
ptrblck's avatar
ptrblck committed
387
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
388
                                              'nvcc':['-O3',
389
                                                      '-gencode', 'arch=compute_70,code=sm_70',
390
391
392
393
394
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
ptrblck's avatar
ptrblck committed
395
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
396
397
        ext_modules.append(
            CUDAExtension(name='fast_self_multihead_attn_norm_add',
398
                          sources=['apex/contrib/csrc/multihead_attn/self_multihead_attn_norm_add.cpp',
399
                                   'apex/contrib/csrc/multihead_attn/self_multihead_attn_norm_add_cuda.cu'],
ptrblck's avatar
ptrblck committed
400
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
401
                                              'nvcc':['-O3',
402
                                                      '-gencode', 'arch=compute_70,code=sm_70',
403
404
405
406
407
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
ptrblck's avatar
ptrblck committed
408
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
409
410
        ext_modules.append(
            CUDAExtension(name='fast_encdec_multihead_attn',
411
                          sources=['apex/contrib/csrc/multihead_attn/encdec_multihead_attn.cpp',
412
                                   'apex/contrib/csrc/multihead_attn/encdec_multihead_attn_cuda.cu'],
ptrblck's avatar
ptrblck committed
413
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
414
                                              'nvcc':['-O3',
415
                                                      '-gencode', 'arch=compute_70,code=sm_70',
416
417
418
419
420
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
ptrblck's avatar
ptrblck committed
421
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
422
423
        ext_modules.append(
            CUDAExtension(name='fast_encdec_multihead_attn_norm_add',
424
                          sources=['apex/contrib/csrc/multihead_attn/encdec_multihead_attn_norm_add.cpp',
425
                                   'apex/contrib/csrc/multihead_attn/encdec_multihead_attn_norm_add_cuda.cu'],
ptrblck's avatar
ptrblck committed
426
                          extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
427
                                              'nvcc':['-O3',
428
                                                      '-gencode', 'arch=compute_70,code=sm_70',
429
430
431
432
433
                                                      '-I./apex/contrib/csrc/multihead_attn/cutlass/',
                                                      '-U__CUDA_NO_HALF_OPERATORS__',
                                                      '-U__CUDA_NO_HALF_CONVERSIONS__',
                                                      '--expt-relaxed-constexpr',
                                                      '--expt-extended-lambda',
ptrblck's avatar
ptrblck committed
434
                                                      '--use_fast_math'] + version_dependent_macros + generator_flag}))
435

Christian Sarofeen's avatar
Christian Sarofeen committed
436
setup(
437
438
    name='apex',
    version='0.1',
439
440
441
442
    packages=find_packages(exclude=('build',
                                    'csrc',
                                    'include',
                                    'tests',
443
444
445
446
447
                                    'dist',
                                    'docs',
                                    'tests',
                                    'examples',
                                    'apex.egg-info',)),
Christian Sarofeen's avatar
Christian Sarofeen committed
448
    description='PyTorch Extensions written by NVIDIA',
jjsjann123's avatar
jjsjann123 committed
449
450
    ext_modules=ext_modules,
    cmdclass=cmdclass,
ptrblck's avatar
ptrblck committed
451
    extras_require=extras,
Christian Sarofeen's avatar
Christian Sarofeen committed
452
)