__init__.py 4.03 KB
Newer Older
Jared Casper's avatar
Jared Casper committed
1
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2

3
import os
4
5
import pathlib
import subprocess
6

7
8
from torch.utils import cpp_extension

9
10
11
12
13
# Setting this param to a list has a problem of generating different
# compilation commands (with diferent order of architectures) and
# leading to recompilation of fused kernels. Set it to empty string
# to avoid recompilation and assign arch flags explicity in
# extra_cuda_cflags below
14
15
os.environ["TORCH_CUDA_ARCH_LIST"] = ""

16

17
def load(args):
18

19
    # Check if cuda 11 is installed for compute capability 8.0
20
    cc_flag = []
21
22
    _, bare_metal_major, _ = _get_cuda_bare_metal_version(
        cpp_extension.CUDA_HOME)
23
24
25
26
    if int(bare_metal_major) >= 11:
        cc_flag.append('-gencode')
        cc_flag.append('arch=compute_80,code=sm_80')

27
    # Build path
28
    srcpath = pathlib.Path(__file__).parent.absolute()
29
    buildpath = srcpath / 'build'
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
    _create_build_dir(buildpath)

    # Helper function to build the kernels.
    def _cpp_extention_load_helper(name, sources, extra_cuda_flags):
        return cpp_extension.load(
            name=name,
            sources=sources,
            build_directory=buildpath,
            extra_cflags=['-O3',],
            extra_cuda_cflags=['-O3',
                               '-gencode', 'arch=compute_70,code=sm_70',
                               '--use_fast_math'] + extra_cuda_flags + cc_flag,
            verbose=(args.rank == 0)
        )

    # ==============
    # Fused softmax.
    # ==============

    if args.masked_softmax_fusion:
        extra_cuda_flags = ['-U__CUDA_NO_HALF_OPERATORS__',
                            '-U__CUDA_NO_HALF_CONVERSIONS__',
                            '--expt-relaxed-constexpr',
                            '--expt-extended-lambda']
        
        # Upper triangular softmax.
56
        sources=[srcpath / 'scaled_upper_triang_masked_softmax.cpp',
57
58
59
60
                 srcpath / 'scaled_upper_triang_masked_softmax_cuda.cu']
        scaled_upper_triang_masked_softmax_cuda = _cpp_extention_load_helper(
            "scaled_upper_triang_masked_softmax_cuda",
            sources, extra_cuda_flags)
61

62
        # Masked softmax.
63
        sources=[srcpath / 'scaled_masked_softmax.cpp',
64
65
66
                 srcpath / 'scaled_masked_softmax_cuda.cu']
        scaled_masked_softmax_cuda = _cpp_extention_load_helper(
            "scaled_masked_softmax_cuda", sources, extra_cuda_flags)
67

68
69
70
71
72
73
        # Softmax
        sources=[srcpath / 'scaled_softmax.cpp',
                 srcpath / 'scaled_softmax_cuda.cu']
        scaled_softmax_cuda = _cpp_extention_load_helper(
            "scaled_softmax_cuda", sources, extra_cuda_flags)

74
75
76
    # =================================
    # Mixed precision fused layer norm.
    # =================================
77

78
79
80
81
82
    extra_cuda_flags = ['-maxrregcount=50']
    sources=[srcpath / 'layer_norm_cuda.cpp',
             srcpath / 'layer_norm_cuda_kernel.cu']
    fused_mix_prec_layer_norm_cuda = _cpp_extention_load_helper(
        "fused_mix_prec_layer_norm_cuda", sources, extra_cuda_flags)
83

Sangkug Lym's avatar
Sangkug Lym committed
84
85
86
87
88
89
90
91
92
93
    # =================================
    # Fused gradient accumulation to weight gradient computation of linear layer
    # =================================

    if args.gradient_accumulation_fusion:
        sources=[srcpath / 'fused_weight_gradient_dense.cpp',
                 srcpath / 'fused_weight_gradient_dense.cu']
        fused_dense_cuda = _cpp_extention_load_helper(
            "fused_dense_cuda", sources, [])

94

95
96
97
98
99
100
101
102
def _get_cuda_bare_metal_version(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
                                         universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
    release = output[release_idx].split(".")
    bare_metal_major = release[0]
    bare_metal_minor = release[1][0]
103

104
    return raw_output, bare_metal_major, bare_metal_minor
105

106
107
108
109
110
111
112

def _create_build_dir(buildpath):
    try:
        os.mkdir(buildpath)
    except OSError:
        if not os.path.isdir(buildpath):
            print(f"Creation of the build directory {buildpath} failed")