setup.py 22.8 KB
Newer Older
1
import torch
Masaki Kozuki's avatar
Masaki Kozuki committed
2
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
3
from setuptools import setup, find_packages
mcarilli's avatar
mcarilli committed
4
import subprocess
5

jjsjann123's avatar
jjsjann123 committed
6
import sys
Marek Kolodziej's avatar
Marek Kolodziej committed
7
import warnings
mcarilli's avatar
mcarilli committed
8
import os
jjsjann123's avatar
jjsjann123 committed
9

10
11
12
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))

13

ptrblck's avatar
ptrblck committed
14
15
16
17
18
19
20
21
22
23
def get_cuda_bare_metal_version(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
    release = output[release_idx].split(".")
    bare_metal_major = release[0]
    bare_metal_minor = release[1][0]

    return raw_output, bare_metal_major, bare_metal_minor

24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
    raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
    torch_binary_major = torch.version.cuda.split(".")[0]
    torch_binary_minor = torch.version.cuda.split(".")[1]

    print("\nCompiling cuda extensions with")
    print(raw_output + "from " + cuda_dir + "/bin\n")

    if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
        raise RuntimeError(
            "Cuda extensions are being compiled with a version of Cuda that does "
            "not match the version used to compile Pytorch binaries.  "
            "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
            + "In some cases, a minor-version mismatch will not cause later errors:  "
            "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798.  "
            "You can try commenting out this check (at your own risk)."
        )


def raise_if_cuda_home_none(global_option: str) -> None:
    if CUDA_HOME is not None:
        return
    raise RuntimeError(
        f"{global_option} was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  "
        "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
        "only images whose names contain 'devel' will provide nvcc."
    )


def append_nvcc_threads(nvcc_extra_args):
    return nvcc_extra_args + ["--threads", "4"]


58
if not torch.cuda.is_available():
mcarilli's avatar
mcarilli committed
59
60
61
    # https://github.com/NVIDIA/apex/issues/486
    # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
    # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
62
63
64
65
66
67
68
69
70
    print(
        "\nWarning: Torch did not find available GPUs on this system.\n",
        "If your intention is to cross-compile, this is not an error.\n"
        "By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
        "Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
        "and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
        "If you wish to cross-compile for a single specific architecture,\n"
        'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
    )
mcarilli's avatar
mcarilli committed
71
    if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
Masaki Kozuki's avatar
Masaki Kozuki committed
72
        _, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
ptrblck's avatar
ptrblck committed
73
        if int(bare_metal_major) == 11:
74
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
ptrblck's avatar
ptrblck committed
75
76
        else:
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
77

78
print("\n\ntorch.__version__  = {}\n\n".format(torch.__version__))
79
80
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
81
82

if TORCH_MAJOR == 0 and TORCH_MINOR < 4:
83
84
85
    raise RuntimeError(
        "Apex requires Pytorch 0.4 or newer.\nThe latest stable release can be obtained from https://pytorch.org/"
    )
86

jjsjann123's avatar
jjsjann123 committed
87
88
89
cmdclass = {}
ext_modules = []

ptrblck's avatar
ptrblck committed
90
extras = {}
Marek Kolodziej's avatar
Marek Kolodziej committed
91
if "--pyprof" in sys.argv:
92
93
94
95
96
97
    string = (
        "\n\nPyprof has been moved to its own dedicated repository and will "
        "soon be removed from Apex.  Please visit\n"
        "https://github.com/NVIDIA/PyProf\n"
        "for the latest version."
    )
98
    warnings.warn(string, DeprecationWarning)
99
    with open("requirements.txt") as f:
Marek Kolodziej's avatar
Marek Kolodziej committed
100
        required_packages = f.read().splitlines()
101
102
        extras["pyprof"] = required_packages
    sys.argv.remove("--pyprof")
Marek Kolodziej's avatar
Marek Kolodziej committed
103
104
105
else:
    warnings.warn("Option --pyprof not specified. Not installing PyProf dependencies!")

106
if "--cpp_ext" in sys.argv or "--cuda_ext" in sys.argv:
Michael Carilli's avatar
Michael Carilli committed
107
    if TORCH_MAJOR == 0:
108
109
110
        raise RuntimeError(
            "--cpp_ext requires Pytorch 1.0 or later, " "found torch.__version__ = {}".format(torch.__version__)
        )
111
112
113

if "--cpp_ext" in sys.argv:
    sys.argv.remove("--cpp_ext")
114
    ext_modules.append(CppExtension("apex_C", ["csrc/flatten_unflatten.cpp"]))
mcarilli's avatar
mcarilli committed
115

ptrblck's avatar
ptrblck committed
116

mcarilli's avatar
mcarilli committed
117
118
119
120
121
122
123
# Set up macros for forward/backward compatibility hack around
# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
# and
# https://github.com/NVIDIA/apex/issues/456
# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
124
    version_ge_1_1 = ["-DVERSION_GE_1_1"]
mcarilli's avatar
mcarilli committed
125
126
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
127
    version_ge_1_3 = ["-DVERSION_GE_1_3"]
128
129
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
130
    version_ge_1_5 = ["-DVERSION_GE_1_5"]
131
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
mcarilli's avatar
mcarilli committed
132

133
134
if "--distributed_adam" in sys.argv:
    sys.argv.remove("--distributed_adam")
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
    raise_if_cuda_home_none("--distributed_adam")
    ext_modules.append(
        CUDAExtension(
            name="distributed_adam_cuda",
            sources=[
                "apex/contrib/csrc/optimizers/multi_tensor_distopt_adam.cpp",
                "apex/contrib/csrc/optimizers/multi_tensor_distopt_adam_kernel.cu",
            ],
            include_dirs=[os.path.join(this_dir, "csrc")],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(["-O3", "--use_fast_math"] + version_dependent_macros),
            },
        )
    )
150

151
152
if "--distributed_lamb" in sys.argv:
    sys.argv.remove("--distributed_lamb")
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
    raise_if_cuda_home_none("--distributed_lamb")
    ext_modules.append(
        CUDAExtension(
            name="distributed_lamb_cuda",
            sources=[
                "apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb.cpp",
                "apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb_kernel.cu",
            ],
            include_dirs=[os.path.join(this_dir, "csrc")],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(["-O3", "--use_fast_math"] + version_dependent_macros),
            },
        )
    )
168

jjsjann123's avatar
jjsjann123 committed
169
170
if "--cuda_ext" in sys.argv:
    sys.argv.remove("--cuda_ext")
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
    raise_if_cuda_home_none("--cuda_ext")
    check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)

    ext_modules.append(
        CUDAExtension(
            name="amp_C",
            sources=[
                "csrc/amp_C_frontend.cpp",
                "csrc/multi_tensor_sgd_kernel.cu",
                "csrc/multi_tensor_scale_kernel.cu",
                "csrc/multi_tensor_axpby_kernel.cu",
                "csrc/multi_tensor_l2norm_kernel.cu",
                "csrc/multi_tensor_l2norm_kernel_mp.cu",
                "csrc/multi_tensor_l2norm_scale_kernel.cu",
                "csrc/multi_tensor_lamb_stage_1.cu",
                "csrc/multi_tensor_lamb_stage_2.cu",
                "csrc/multi_tensor_adam.cu",
                "csrc/multi_tensor_adagrad.cu",
                "csrc/multi_tensor_novograd.cu",
                "csrc/multi_tensor_lamb.cu",
                "csrc/multi_tensor_lamb_mp.cu",
            ],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(
                    [
                        "-lineinfo",
                        "-O3",
                        # '--resource-usage',
                        "--use_fast_math",
                    ]
                    + version_dependent_macros
                ),
            },
        )
    )
    ext_modules.append(
        CUDAExtension(
            name="syncbn",
            sources=["csrc/syncbn.cpp", "csrc/welford.cu"],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(["-O3"] + version_dependent_macros),
            },
        )
    )

    ext_modules.append(
        CUDAExtension(
            name="fused_layer_norm_cuda",
            sources=["csrc/layer_norm_cuda.cpp", "csrc/layer_norm_cuda_kernel.cu"],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(["-maxrregcount=50", "-O3", "--use_fast_math"] + version_dependent_macros),
            },
        )
    )

    ext_modules.append(
        CUDAExtension(
            name="mlp_cuda",
            sources=["csrc/mlp.cpp", "csrc/mlp_cuda.cu"],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(["-O3"] + version_dependent_macros),
            },
        )
    )
    ext_modules.append(
        CUDAExtension(
            name="fused_dense_cuda",
            sources=["csrc/fused_dense.cpp", "csrc/fused_dense_cuda.cu"],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(["-O3"] + version_dependent_macros),
            },
        )
    )
249

250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
    ext_modules.append(
        CUDAExtension(
            name="scaled_upper_triang_masked_softmax_cuda",
            sources=[
                "csrc/megatron/scaled_upper_triang_masked_softmax.cpp",
                "csrc/megatron/scaled_upper_triang_masked_softmax_cuda.cu",
            ],
            include_dirs=[os.path.join(this_dir, "csrc")],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(
                    [
                        "-O3",
                        "-U__CUDA_NO_HALF_OPERATORS__",
                        "-U__CUDA_NO_HALF_CONVERSIONS__",
                        "--expt-relaxed-constexpr",
                        "--expt-extended-lambda",
                    ]
                    + version_dependent_macros
                ),
            },
        )
    )

    ext_modules.append(
        CUDAExtension(
            name="scaled_masked_softmax_cuda",
            sources=["csrc/megatron/scaled_masked_softmax.cpp", "csrc/megatron/scaled_masked_softmax_cuda.cu"],
            include_dirs=[os.path.join(this_dir, "csrc")],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(
                    [
                        "-O3",
                        "-U__CUDA_NO_HALF_OPERATORS__",
                        "-U__CUDA_NO_HALF_CONVERSIONS__",
                        "--expt-relaxed-constexpr",
                        "--expt-extended-lambda",
                    ]
                    + version_dependent_macros
                ),
            },
        )
    )
Masaki Kozuki's avatar
Masaki Kozuki committed
294

jjsjann123's avatar
jjsjann123 committed
295
296
if "--bnp" in sys.argv:
    sys.argv.remove("--bnp")
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
    raise_if_cuda_home_none("--bnp")
    ext_modules.append(
        CUDAExtension(
            name="bnp",
            sources=[
                "apex/contrib/csrc/groupbn/batch_norm.cu",
                "apex/contrib/csrc/groupbn/ipc.cu",
                "apex/contrib/csrc/groupbn/interface.cpp",
                "apex/contrib/csrc/groupbn/batch_norm_add_relu.cu",
            ],
            include_dirs=[os.path.join(this_dir, "csrc")],
            extra_compile_args={
                "cxx": [] + version_dependent_macros,
                "nvcc": append_nvcc_threads(
                    [
                        "-DCUDA_HAS_FP16=1",
                        "-D__CUDA_NO_HALF_OPERATORS__",
                        "-D__CUDA_NO_HALF_CONVERSIONS__",
                        "-D__CUDA_NO_HALF2_OPERATORS__",
                    ]
                    + version_dependent_macros
                ),
            },
        )
    )
jjsjann123's avatar
jjsjann123 committed
322

323
324
if "--xentropy" in sys.argv:
    sys.argv.remove("--xentropy")
325
326
327
328
329
330
331
332
333
334
335
336
    raise_if_cuda_home_none("--xentropy")
    ext_modules.append(
        CUDAExtension(
            name="xentropy_cuda",
            sources=["apex/contrib/csrc/xentropy/interface.cpp", "apex/contrib/csrc/xentropy/xentropy_kernel.cu"],
            include_dirs=[os.path.join(this_dir, "csrc")],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(["-O3"] + version_dependent_macros),
            },
        )
    )
337

338
339
if "--deprecated_fused_adam" in sys.argv:
    sys.argv.remove("--deprecated_fused_adam")
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
    raise_if_cuda_home_none("--deprecated_fused_adam")
    ext_modules.append(
        CUDAExtension(
            name="fused_adam_cuda",
            sources=[
                "apex/contrib/csrc/optimizers/fused_adam_cuda.cpp",
                "apex/contrib/csrc/optimizers/fused_adam_cuda_kernel.cu",
            ],
            include_dirs=[os.path.join(this_dir, "csrc")],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(["-O3", "--use_fast_math"] + version_dependent_macros),
            },
        )
    )
355

356
357
if "--deprecated_fused_lamb" in sys.argv:
    sys.argv.remove("--deprecated_fused_lamb")
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
    raise_if_cuda_home_none("--deprecated_fused_lamb")
    ext_modules.append(
        CUDAExtension(
            name="fused_lamb_cuda",
            sources=[
                "apex/contrib/csrc/optimizers/fused_lamb_cuda.cpp",
                "apex/contrib/csrc/optimizers/fused_lamb_cuda_kernel.cu",
                "csrc/multi_tensor_l2norm_kernel.cu",
            ],
            include_dirs=[os.path.join(this_dir, "csrc")],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(["-O3", "--use_fast_math"] + version_dependent_macros),
            },
        )
    )
374

375
# Check, if ATen/CUDAGenerator.h is found, otherwise use the new ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026
ptrblck's avatar
ptrblck committed
376
377
generator_flag = []
torch_dir = torch.__path__[0]
378
379
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGenerator.h")):
    generator_flag = ["-DOLD_GENERATOR"]
ptrblck's avatar
ptrblck committed
380

yjk21's avatar
yjk21 committed
381
382
if "--fast_layer_norm" in sys.argv:
    sys.argv.remove("--fast_layer_norm")
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
    raise_if_cuda_home_none("--fast_layer_norm")
    # Check, if CUDA11 is installed for compute capability 8.0
    cc_flag = []
    _, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
    if int(bare_metal_major) >= 11:
        cc_flag.append("-gencode")
        cc_flag.append("arch=compute_80,code=sm_80")

    ext_modules.append(
        CUDAExtension(
            name="fast_layer_norm",
            sources=[
                "apex/contrib/csrc/layer_norm/ln_api.cpp",
                "apex/contrib/csrc/layer_norm/ln_fwd_cuda_kernel.cu",
                "apex/contrib/csrc/layer_norm/ln_bwd_semi_cuda_kernel.cu",
            ],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros + generator_flag,
                "nvcc": append_nvcc_threads(
                    [
                        "-O3",
                        "-gencode",
                        "arch=compute_70,code=sm_70",
                        "-U__CUDA_NO_HALF_OPERATORS__",
                        "-U__CUDA_NO_HALF_CONVERSIONS__",
                        "-U__CUDA_NO_BFLOAT16_OPERATORS__",
                        "-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
                        "-U__CUDA_NO_BFLOAT162_OPERATORS__",
                        "-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
                        "-I./apex/contrib/csrc/layer_norm/",
                        "--expt-relaxed-constexpr",
                        "--expt-extended-lambda",
                        "--use_fast_math",
                    ]
                    + version_dependent_macros
                    + generator_flag
                    + cc_flag
                ),
            },
            include_dirs=[os.path.join(this_dir, "apex/contrib/csrc/layer_norm")],
        )
    )
yjk21's avatar
yjk21 committed
425

yjk21's avatar
yjk21 committed
426
427
if "--fmha" in sys.argv:
    sys.argv.remove("--fmha")
428
429
430
431
432
433
434
435
    raise_if_cuda_home_none("--fmha")
    # Check, if CUDA11 is installed for compute capability 8.0
    cc_flag = []
    _, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
    if int(bare_metal_major) < 11:
        raise RuntimeError("--fmha only supported on SM80")
    cc_flag.append("-gencode")
    cc_flag.append("arch=compute_80,code=sm_80")
yjk21's avatar
yjk21 committed
436

437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
    ext_modules.append(
        CUDAExtension(
            name="fmhalib",
            sources=[
                "apex/contrib/csrc/fmha/fmha_api.cpp",
                "apex/contrib/csrc/fmha/src/fmha_noloop_reduce.cu",
                "apex/contrib/csrc/fmha/src/fmha_fprop_fp16_128_64_kernel.sm80.cu",
                "apex/contrib/csrc/fmha/src/fmha_fprop_fp16_256_64_kernel.sm80.cu",
                "apex/contrib/csrc/fmha/src/fmha_fprop_fp16_384_64_kernel.sm80.cu",
                "apex/contrib/csrc/fmha/src/fmha_fprop_fp16_512_64_kernel.sm80.cu",
                "apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_128_64_kernel.sm80.cu",
                "apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_256_64_kernel.sm80.cu",
                "apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_384_64_kernel.sm80.cu",
                "apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_512_64_kernel.sm80.cu",
            ],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros + generator_flag,
                "nvcc": append_nvcc_threads(
                    [
                        "-O3",
                        "-U__CUDA_NO_HALF_OPERATORS__",
                        "-U__CUDA_NO_HALF_CONVERSIONS__",
                        "--expt-relaxed-constexpr",
                        "--expt-extended-lambda",
                        "--use_fast_math",
                    ]
                    + version_dependent_macros
                    + generator_flag
                    + cc_flag
                ),
            },
            include_dirs=[
                os.path.join(this_dir, "apex/contrib/csrc"),
                os.path.join(this_dir, "apex/contrib/csrc/fmha/src"),
            ],
        )
    )
yjk21's avatar
yjk21 committed
474

ptrblck's avatar
ptrblck committed
475

476
477
if "--fast_multihead_attn" in sys.argv:
    sys.argv.remove("--fast_multihead_attn")
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
    raise_if_cuda_home_none("--fast_multihead_attn")

    # Check, if CUDA11 is installed for compute capability 8.0
    cc_flag = []
    _, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
    if int(bare_metal_major) >= 11:
        cc_flag.append("-gencode")
        cc_flag.append("arch=compute_80,code=sm_80")
        cc_flag.append("-gencode")
        cc_flag.append("arch=compute_86,code=sm_86")

    subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/multihead_attn/cutlass"])
    ext_modules.append(
        CUDAExtension(
            name="fast_multihead_attn",
            sources=[
                "apex/contrib/csrc/multihead_attn/multihead_attn_frontend.cpp",
                "apex/contrib/csrc/multihead_attn/additive_masked_softmax_dropout_cuda.cu",
                "apex/contrib/csrc/multihead_attn/masked_softmax_dropout_cuda.cu",
                "apex/contrib/csrc/multihead_attn/encdec_multihead_attn_cuda.cu",
                "apex/contrib/csrc/multihead_attn/encdec_multihead_attn_norm_add_cuda.cu",
                "apex/contrib/csrc/multihead_attn/self_multihead_attn_cuda.cu",
                "apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_additive_mask_cuda.cu",
                "apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_cuda.cu",
                "apex/contrib/csrc/multihead_attn/self_multihead_attn_norm_add_cuda.cu",
            ],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros + generator_flag,
                "nvcc": append_nvcc_threads(
                    [
                        "-O3",
                        "-gencode",
                        "arch=compute_70,code=sm_70",
                        "-U__CUDA_NO_HALF_OPERATORS__",
                        "-U__CUDA_NO_HALF_CONVERSIONS__",
                        "--expt-relaxed-constexpr",
                        "--expt-extended-lambda",
                        "--use_fast_math",
                    ]
                    + version_dependent_macros
                    + generator_flag
                    + cc_flag
                ),
            },
            include_dirs=[os.path.join(this_dir, "apex/contrib/csrc/multihead_attn/cutlass")],
523
        )
524
    )
525

526
527
if "--transducer" in sys.argv:
    sys.argv.remove("--transducer")
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
    raise_if_cuda_home_none("--transducer")
    ext_modules.append(
        CUDAExtension(
            name="transducer_joint_cuda",
            sources=[
                "apex/contrib/csrc/transducer/transducer_joint.cpp",
                "apex/contrib/csrc/transducer/transducer_joint_kernel.cu",
            ],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(["-O3"] + version_dependent_macros),
            },
            include_dirs=[os.path.join(this_dir, "csrc"), os.path.join(this_dir, "apex/contrib/csrc/multihead_attn")],
        )
    )
    ext_modules.append(
        CUDAExtension(
            name="transducer_loss_cuda",
            sources=[
                "apex/contrib/csrc/transducer/transducer_loss.cpp",
                "apex/contrib/csrc/transducer/transducer_loss_kernel.cu",
            ],
            include_dirs=[os.path.join(this_dir, "csrc")],
            extra_compile_args={
                "cxx": ["-O3"] + version_dependent_macros,
                "nvcc": append_nvcc_threads(["-O3"] + version_dependent_macros),
            },
        )
    )
557

558
559
if "--fast_bottleneck" in sys.argv:
    sys.argv.remove("--fast_bottleneck")
560
561
562
563
564
565
566
567
568
569
    raise_if_cuda_home_none("--fast_bottleneck")
    subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/cudnn-frontend/"])
    ext_modules.append(
        CUDAExtension(
            name="fast_bottleneck",
            sources=["apex/contrib/csrc/bottleneck/bottleneck.cpp"],
            include_dirs=[os.path.join(this_dir, "apex/contrib/csrc/cudnn-frontend/include")],
            extra_compile_args={"cxx": ["-O3"] + version_dependent_macros + generator_flag},
        )
    )
570

Masaki Kozuki's avatar
Masaki Kozuki committed
571

Christian Sarofeen's avatar
Christian Sarofeen committed
572
setup(
573
574
575
576
577
578
    name="apex",
    version="0.1",
    packages=find_packages(
        exclude=("build", "csrc", "include", "tests", "dist", "docs", "tests", "examples", "apex.egg-info",)
    ),
    description="PyTorch Extensions written by NVIDIA",
jjsjann123's avatar
jjsjann123 committed
579
    ext_modules=ext_modules,
580
    cmdclass={"build_ext": BuildExtension} if ext_modules else {},
ptrblck's avatar
ptrblck committed
581
    extras_require=extras,
Christian Sarofeen's avatar
Christian Sarofeen committed
582
)