builder.py 8.48 KB
Newer Older
1
import importlib
2
import os
3
4
import time
from abc import ABC, abstractmethod
5
6
7
from pathlib import Path
from typing import List

8
from .utils import check_cuda_availability, check_system_pytorch_cuda_match, print_rank_0
9
10


11
12
13
class Builder(ABC):
    """
    Builder is the base class to build extensions for PyTorch.
14

15
16
17
    Args:
        name (str): the name of the kernel to be built
        prebuilt_import_path (str): the path where the extension is installed during pip install
18
    """
19

20
21
22
23
24
    def __init__(self, name: str, prebuilt_import_path: str):
        self.name = name
        self.prebuilt_import_path = prebuilt_import_path
        self.version_dependent_macros = ['-DVERSION_GE_1_1', '-DVERSION_GE_1_3', '-DVERSION_GE_1_5']

25
26
27
        # we store the op as an attribute to avoid repeated building and loading
        self.cached_op_module = None

28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
        assert prebuilt_import_path.startswith('colossalai._C'), \
            f'The prebuilt_import_path should start with colossalai._C, but got {self.prebuilt_import_path}'

    def relative_to_abs_path(self, code_path: str) -> str:
        """
        This function takes in a path relative to the colossalai root directory and return the absolute path.
        """
        op_builder_module_path = Path(__file__).parent

        # if we install from source
        # the current file path will be op_builder/builder.py
        # if we install via pip install colossalai
        # the current file path will be colossalai/kernel/op_builder/builder.py
        # this is because that the op_builder inside colossalai is a symlink
        # this symlink will be replaced with actual files if we install via pypi
        # thus we cannot tell the colossalai root directory by checking whether the op_builder
        # is a symlink, we can only tell whether it is inside or outside colossalai
        if str(op_builder_module_path).endswith('colossalai/kernel/op_builder'):
            root_path = op_builder_module_path.parent.parent
47
        else:
48
49
50
51
            root_path = op_builder_module_path.parent.joinpath('colossalai')

        code_abs_path = root_path.joinpath(code_path)
        return str(code_abs_path)
52
53
54
55
56
57
58
59
60
61
62

    def get_cuda_home_include(self):
        """
        return include path inside the cuda home.
        """
        from torch.utils.cpp_extension import CUDA_HOME
        if CUDA_HOME is None:
            raise RuntimeError("CUDA_HOME is None, please set CUDA_HOME to compile C++/CUDA kernels in ColossalAI.")
        cuda_include = os.path.join(CUDA_HOME, "include")
        return cuda_include

63
64
65
    def csrc_abs_path(self, path):
        return os.path.join(self.relative_to_abs_path('kernel/cuda_native/csrc'), path)

66
    # functions must be overrided begin
67
68
69
70
71
    @abstractmethod
    def sources_files(self) -> List[str]:
        """
        This function should return a list of source files for extensions.
        """
72
73
        raise NotImplementedError

74
75
76
77
78
79
    @abstractmethod
    def include_dirs(self) -> List[str]:
        """
        This function should return a list of inlcude files for extensions.
        """
        pass
80

81
82
83
84
85
86
    @abstractmethod
    def cxx_flags(self) -> List[str]:
        """
        This function should return a list of cxx compilation flags for extensions.
        """
        pass
87

88
89
90
91
92
93
    @abstractmethod
    def nvcc_flags(self) -> List[str]:
        """
        This function should return a list of nvcc compilation flags for extensions.
        """
        pass
94
95
96
97
98
99
100
101

    # functions must be overrided over
    def strip_empty_entries(self, args):
        '''
        Drop any empty strings from the list of compile and link flags
        '''
        return [x for x in args if len(x) > 0]

102
103
104
105
106
107
    def import_op(self):
        """
        This function will import the op module by its string name.
        """
        return importlib.import_module(self.prebuilt_import_path)

108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
    def check_runtime_build_environment(self):
        """
        Check whether the system environment is ready for extension compilation.
        """
        try:
            import torch
            from torch.utils.cpp_extension import CUDA_HOME
            TORCH_AVAILABLE = True
        except ImportError:
            TORCH_AVAILABLE = False
            CUDA_HOME = None

        if not TORCH_AVAILABLE:
            raise ModuleNotFoundError(
                "PyTorch is not found. You need to install PyTorch first in order to build CUDA extensions")

        if CUDA_HOME is None:
            raise RuntimeError(
                "CUDA_HOME is not found. You need to export CUDA_HOME environment vairable or install CUDA Toolkit first in order to build CUDA extensions"
            )

        # make sure CUDA is available for compilation during
        cuda_available = check_cuda_availability()
        if not cuda_available:
            raise RuntimeError("CUDA is not available on your system as torch.cuda.is_avaible() returns False.")

        # make sure system CUDA and pytorch CUDA match, an error will raised inside the function if not
        check_system_pytorch_cuda_match(CUDA_HOME)

137
138
    def load(self, verbose=True):
        """
139
140
141
        load the kernel during runtime. If the kernel is not built during pip install, it will build the kernel.
        If the kernel is built during runtime, it will be stored in `~/.cache/colossalai/torch_extensions/`. If the
        kernel is built during pip install, it can be accessed through `colossalai._C`.
142

143
        Warning: do not load this kernel repeatedly during model execution as it could slow down the training process.
144
145
146
147

        Args:
            verbose (bool, optional): show detailed info. Defaults to True.
        """
148
149
150
        # if the kernel has be compiled and cached, we directly use it
        if self.cached_op_module is not None:
            return self.cached_op_module
151

152
        try:
153
154
            # if the kernel has been pre-built during installation
            # we just directly import it
155
156
            op_module = self.import_op()
            if verbose:
157
158
                print_rank_0(
                    f"[extension] OP {self.prebuilt_import_path} has been compileed ahead of time, skip building.")
159
        except ImportError:
160
161
162
163
164
165
            # check environment
            self.check_runtime_build_environment()

            # time the kernel compilation
            start_build = time.time()

166
167
            # construct the build directory
            import torch
168
            from torch.utils.cpp_extension import load
169
170
171
172
173
174
175
176
177
            torch_version_major = torch.__version__.split('.')[0]
            torch_version_minor = torch.__version__.split('.')[1]
            torch_cuda_version = torch.version.cuda
            home_directory = os.path.expanduser('~')
            extension_directory = f".cache/colossalai/torch_extensions/torch{torch_version_major}.{torch_version_minor}_cu{torch_cuda_version}"
            build_directory = os.path.join(home_directory, extension_directory)
            Path(build_directory).mkdir(parents=True, exist_ok=True)

            if verbose:
178
                print_rank_0(f"[extension] Compiling or loading the JIT-built {self.name} kernel during runtime now")
179
180
181
182
183
184
185
186
187
188

            # load the kernel
            op_module = load(name=self.name,
                             sources=self.strip_empty_entries(self.sources_files()),
                             extra_include_paths=self.strip_empty_entries(self.include_dirs()),
                             extra_cflags=self.cxx_flags(),
                             extra_cuda_cflags=self.nvcc_flags(),
                             extra_ldflags=[],
                             build_directory=build_directory,
                             verbose=verbose)
189

190
191
192
193
194
195
196
197
            build_duration = time.time() - start_build

            # log jit compilation time
            if verbose:
                print_rank_0(f"[extension] Time to compile or load {self.name} op: {build_duration} seconds")

        # cache the built/loaded kernel
        self.cached_op_module = op_module
198
199
200

        return op_module

201
    def builder(self) -> 'CUDAExtension':
202
203
204
205
206
        """
        get a CUDAExtension instance used for setup.py
        """
        from torch.utils.cpp_extension import CUDAExtension

207
208
209
210
211
212
213
        return CUDAExtension(name=self.prebuilt_import_path,
                             sources=self.strip_empty_entries(self.sources_files()),
                             include_dirs=self.strip_empty_entries(self.include_dirs()),
                             extra_compile_args={
                                 'cxx': self.strip_empty_entries(self.cxx_flags()),
                                 'nvcc': self.strip_empty_entries(self.nvcc_flags())
                             })