__init__.py 4.33 KB
Newer Older
Jared Casper's avatar
Jared Casper committed
1
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2

3
import os
4
5
import pathlib
import subprocess
6

7
8
from torch.utils import cpp_extension

9
10
11
12
13
# Setting this param to a list has a problem of generating different
# compilation commands (with diferent order of architectures) and
# leading to recompilation of fused kernels. Set it to empty string
# to avoid recompilation and assign arch flags explicity in
# extra_cuda_cflags below
14
15
os.environ["TORCH_CUDA_ARCH_LIST"] = ""

16

17
def load(args):
18

19
    # Check if cuda 11 is installed for compute capability 8.0
20
    cc_flag = []
21
    _, bare_metal_major, bare_metal_minor = _get_cuda_bare_metal_version(
22
        cpp_extension.CUDA_HOME)
23
24
25
    if int(bare_metal_major) >= 11:
        cc_flag.append('-gencode')
        cc_flag.append('arch=compute_80,code=sm_80')
26
27
28
        if int(bare_metal_minor) >= 7:
            cc_flag.append('-gencode')
            cc_flag.append('arch=compute_90,code=sm_90')
29

30
    # Build path
31
    srcpath = pathlib.Path(__file__).parent.absolute()
32
    buildpath = srcpath / 'build'
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
    _create_build_dir(buildpath)

    # Helper function to build the kernels.
    def _cpp_extention_load_helper(name, sources, extra_cuda_flags):
        return cpp_extension.load(
            name=name,
            sources=sources,
            build_directory=buildpath,
            extra_cflags=['-O3',],
            extra_cuda_cflags=['-O3',
                               '-gencode', 'arch=compute_70,code=sm_70',
                               '--use_fast_math'] + extra_cuda_flags + cc_flag,
            verbose=(args.rank == 0)
        )

    # ==============
    # Fused softmax.
    # ==============

    if args.masked_softmax_fusion:
        extra_cuda_flags = ['-U__CUDA_NO_HALF_OPERATORS__',
                            '-U__CUDA_NO_HALF_CONVERSIONS__',
                            '--expt-relaxed-constexpr',
                            '--expt-extended-lambda']
        
        # Upper triangular softmax.
59
        sources=[srcpath / 'scaled_upper_triang_masked_softmax.cpp',
60
61
62
63
                 srcpath / 'scaled_upper_triang_masked_softmax_cuda.cu']
        scaled_upper_triang_masked_softmax_cuda = _cpp_extention_load_helper(
            "scaled_upper_triang_masked_softmax_cuda",
            sources, extra_cuda_flags)
64

65
        # Masked softmax.
66
        sources=[srcpath / 'scaled_masked_softmax.cpp',
67
68
69
                 srcpath / 'scaled_masked_softmax_cuda.cu']
        scaled_masked_softmax_cuda = _cpp_extention_load_helper(
            "scaled_masked_softmax_cuda", sources, extra_cuda_flags)
70

71
72
73
74
75
76
        # Softmax
        sources=[srcpath / 'scaled_softmax.cpp',
                 srcpath / 'scaled_softmax_cuda.cu']
        scaled_softmax_cuda = _cpp_extention_load_helper(
            "scaled_softmax_cuda", sources, extra_cuda_flags)

77
78
79
    # =================================
    # Mixed precision fused layer norm.
    # =================================
80

81
82
83
    extra_hopper_flags = ['-U__CUDA_NO_HALF_OPERATORS__',
                          '-U__CUDA_NO_HALF_CONVERSIONS__']

84
85
86
87
    extra_cuda_flags = ['-maxrregcount=50']
    sources=[srcpath / 'layer_norm_cuda.cpp',
             srcpath / 'layer_norm_cuda_kernel.cu']
    fused_mix_prec_layer_norm_cuda = _cpp_extention_load_helper(
88
        "fused_mix_prec_layer_norm_cuda", sources, extra_cuda_flags + extra_hopper_flags)
89

Sangkug Lym's avatar
Sangkug Lym committed
90
91
92
93
94
95
96
97
    # =================================
    # Fused gradient accumulation to weight gradient computation of linear layer
    # =================================

    if args.gradient_accumulation_fusion:
        sources=[srcpath / 'fused_weight_gradient_dense.cpp',
                 srcpath / 'fused_weight_gradient_dense.cu']
        fused_dense_cuda = _cpp_extention_load_helper(
98
            "fused_dense_cuda", sources, extra_hopper_flags)
Sangkug Lym's avatar
Sangkug Lym committed
99

100

101
102
103
104
105
106
107
108
def _get_cuda_bare_metal_version(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
                                         universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
    release = output[release_idx].split(".")
    bare_metal_major = release[0]
    bare_metal_minor = release[1][0]
109

110
    return raw_output, bare_metal_major, bare_metal_minor
111

112
113
114
115
116
117
118

def _create_build_dir(buildpath):
    try:
        os.mkdir(buildpath)
    except OSError:
        if not os.path.isdir(buildpath):
            print(f"Creation of the build directory {buildpath} failed")