pytorch.py 3.63 KB
Newer Older
1
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
#
# See LICENSE for license information.

"""PyTorch related extensions."""
import os
from pathlib import Path

import setuptools

from .utils import (
    all_files_in_dir,
13
14
    cuda_archs,
    cuda_version,
15
16
17
18
19
20
21
22
23
24
25
26
27
28
)


def setup_pytorch_extension(
    csrc_source_files,
    csrc_header_files,
    common_header_files,
) -> setuptools.Extension:
    """Setup CUDA extension for PyTorch support"""

    # Source files
    csrc_source_files = Path(csrc_source_files)
    extensions_dir = csrc_source_files / "extensions"
    sources = [
29
        csrc_source_files / "common.cpp",
30
31
32
33
34
35
36
37
38
    ] + all_files_in_dir(extensions_dir)

    # Header files
    include_dirs = [
        common_header_files,
        common_header_files / "common",
        common_header_files / "common" / "include",
        csrc_header_files,
    ]
39

40
    # Compiler flags
41
42
43
44
    cxx_flags = [
        "-O3",
        "-fvisibility=hidden",
    ]
45
46
47
48
49
50
51
52
53
54
55
56
57
    nvcc_flags = [
        "-O3",
        "-U__CUDA_NO_HALF_OPERATORS__",
        "-U__CUDA_NO_HALF_CONVERSIONS__",
        "-U__CUDA_NO_BFLOAT16_OPERATORS__",
        "-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
        "-U__CUDA_NO_BFLOAT162_OPERATORS__",
        "-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
        "--expt-relaxed-constexpr",
        "--expt-extended-lambda",
        "--use_fast_math",
    ]

58
59
60
61
62
    cuda_architectures = cuda_archs()

    if "70" in cuda_architectures:
        nvcc_flags.extend(["-gencode", "arch=compute_70,code=sm_70"])

63
64
65
66
67
68
    # Version-dependent CUDA options
    try:
        version = cuda_version()
    except FileNotFoundError:
        print("Could not determine CUDA Toolkit version")
    else:
69
70
71
72
73
74
75
76
        if version < (12, 0):
            raise RuntimeError("Transformer Engine requires CUDA 12.0 or newer")
        nvcc_flags.extend(
            (
                "--threads",
                os.getenv("NVTE_BUILD_THREADS_PER_JOB", "1"),
            )
        )
77

78
79
80
81
        for arch in cuda_architectures.split(";"):
            if arch == "70":
                continue  # Already handled
            nvcc_flags.extend(["-gencode", f"arch=compute_{arch},code=sm_{arch}"])
82

83
    if bool(int(os.getenv("NVTE_UB_WITH_MPI", "0"))):
84
85
        assert (
            os.getenv("MPI_HOME") is not None
86
87
88
        ), "MPI_HOME=/path/to/mpi must be set when compiling with NVTE_UB_WITH_MPI=1!"
        mpi_path = Path(os.getenv("MPI_HOME"))
        include_dirs.append(mpi_path / "include")
89
90
        cxx_flags.append("-DNVTE_UB_WITH_MPI")
        nvcc_flags.append("-DNVTE_UB_WITH_MPI")
91

92
93
94
95
96
97
98
99
100
101
102
103
104
    library_dirs = []
    libraries = []
    if bool(int(os.getenv("NVTE_ENABLE_NVSHMEM", 0))):
        assert (
            os.getenv("NVSHMEM_HOME") is not None
        ), "NVSHMEM_HOME must be set when compiling with NVTE_ENABLE_NVSHMEM=1"
        nvshmem_home = Path(os.getenv("NVSHMEM_HOME"))
        include_dirs.append(nvshmem_home / "include")
        library_dirs.append(nvshmem_home / "lib")
        libraries.append("nvshmem_host")
        cxx_flags.append("-DNVTE_ENABLE_NVSHMEM")
        nvcc_flags.append("-DNVTE_ENABLE_NVSHMEM")

105
106
107
108
109
110
111
    # Construct PyTorch CUDA extension
    sources = [str(path) for path in sources]
    include_dirs = [str(path) for path in include_dirs]
    from torch.utils.cpp_extension import CUDAExtension

    return CUDAExtension(
        name="transformer_engine_torch",
112
113
        sources=[str(src) for src in sources],
        include_dirs=[str(inc) for inc in include_dirs],
114
115
116
117
        extra_compile_args={
            "cxx": cxx_flags,
            "nvcc": nvcc_flags,
        },
118
119
        libraries=[str(lib) for lib in libraries],
        library_dirs=[str(lib_dir) for lib_dir in library_dirs],
120
    )