setup.py 20.3 KB
Newer Older
1
import distutils.command.clean
2
import distutils.spawn
3
import glob
4
import os
5
import shutil
6
7
import subprocess
import sys
8
9

import torch
10
11
from pkg_resources import parse_version, get_distribution, DistributionNotFound
from setuptools import setup, find_packages
12
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
soumith's avatar
soumith committed
13
14


Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
15
def read(*names, **kwargs):
16
    with open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp:
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
17
18
        return fp.read()

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
19

20
21
22
23
24
25
26
def get_dist(pkgname):
    try:
        return get_distribution(pkgname)
    except DistributionNotFound:
        return None


27
28
cwd = os.path.dirname(os.path.abspath(__file__))

29
version_txt = os.path.join(cwd, "version.txt")
30
with open(version_txt) as f:
31
    version = f.readline().strip()
32
33
sha = "Unknown"
package_name = "torchvision"
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
34

35
try:
36
    sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip()
37
38
39
except Exception:
    pass

40
41
42
43
if os.getenv("BUILD_VERSION"):
    version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
    version += "+" + sha[:7]
44
45
46


def write_version_file():
47
48
    version_path = os.path.join(cwd, "torchvision", "version.py")
    with open(version_path, "w") as f:
49
50
        f.write(f"__version__ = '{version}'\n")
        f.write(f"git_version = {repr(sha)}\n")
51
52
53
        f.write("from torchvision.extension import _check_cuda_version\n")
        f.write("if _check_cuda_version() > 0:\n")
        f.write("    cuda = _check_cuda_version()\n")
54
55


56
57
58
pytorch_dep = "torch"
if os.getenv("PYTORCH_VERSION"):
    pytorch_dep += "==" + os.getenv("PYTORCH_VERSION")
soumith's avatar
soumith committed
59

60
requirements = [
61
    "numpy",
62
    "requests",
63
    pytorch_dep,
64
65
]

66
67
# Excluding 8.3.* because of https://github.com/pytorch/vision/issues/4934
pillow_ver = " >= 5.3.0, !=8.3.*"
68
pillow_req = "pillow-simd" if get_dist("pillow-simd") is not None else "pillow"
69
70
requirements.append(pillow_req + pillow_ver)

71

72
73
def find_library(name, vision_include):
    this_dir = os.path.dirname(os.path.abspath(__file__))
74
    build_prefix = os.environ.get("BUILD_PREFIX", None)
75
76
77
78
79
80
    is_conda_build = build_prefix is not None

    library_found = False
    conda_installed = False
    lib_folder = None
    include_folder = None
81
    library_header = f"{name}.h"
82

83
    # Lookup in TORCHVISION_INCLUDE or in the package file
84
    package_path = [os.path.join(this_dir, "torchvision")]
85
86
87
88
89
90
91
    for folder in vision_include + package_path:
        candidate_path = os.path.join(folder, library_header)
        library_found = os.path.exists(candidate_path)
        if library_found:
            break

    if not library_found:
92
        print(f"Running build on conda-build: {is_conda_build}")
93
94
        if is_conda_build:
            # Add conda headers/libraries
95
96
97
98
99
            if os.name == "nt":
                build_prefix = os.path.join(build_prefix, "Library")
            include_folder = os.path.join(build_prefix, "include")
            lib_folder = os.path.join(build_prefix, "lib")
            library_header_path = os.path.join(include_folder, library_header)
100
101
102
            library_found = os.path.isfile(library_header_path)
            conda_installed = library_found
        else:
103
            # Check if using Anaconda to produce wheels
104
            conda = distutils.spawn.find_executable("conda")
105
            is_conda = conda is not None
106
            print(f"Running build on conda: {is_conda}")
107
108
109
            if is_conda:
                python_executable = sys.executable
                py_folder = os.path.dirname(python_executable)
110
111
                if os.name == "nt":
                    env_path = os.path.join(py_folder, "Library")
112
113
                else:
                    env_path = os.path.dirname(py_folder)
114
115
116
                lib_folder = os.path.join(env_path, "lib")
                include_folder = os.path.join(env_path, "include")
                library_header_path = os.path.join(include_folder, library_header)
117
118
119
120
                library_found = os.path.isfile(library_header_path)
                conda_installed = library_found

        if not library_found:
121
            if sys.platform == "linux":
122
123
                library_found = os.path.exists(f"/usr/include/{library_header}")
                library_found = library_found or os.path.exists(f"/usr/local/include/{library_header}")
124
125
126
127

    return library_found, conda_installed, include_folder, lib_folder


128
129
def get_extensions():
    this_dir = os.path.dirname(os.path.abspath(__file__))
130
    extensions_dir = os.path.join(this_dir, "torchvision", "csrc")
131

132
133
134
    main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) + glob.glob(
        os.path.join(extensions_dir, "ops", "*.cpp")
    )
135
    source_cpu = (
136
137
138
        glob.glob(os.path.join(extensions_dir, "ops", "autograd", "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "cpu", "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "quantized", "cpu", "*.cpp"))
139
    )
140
141

    is_rocm_pytorch = False
142
143

    if torch.__version__ >= "1.5":
144
        from torch.utils.cpp_extension import ROCM_HOME
145

146
        is_rocm_pytorch = (torch.version.hip is not None) and (ROCM_HOME is not None)
147
148

    if is_rocm_pytorch:
149
        from torch.utils.hipify import hipify_python
150

151
152
153
        hipify_python.hipify(
            project_directory=this_dir,
            output_directory=this_dir,
154
            includes="torchvision/csrc/ops/cuda/*",
155
156
            show_detailed=True,
            is_pytorch_extension=True,
157
        )
158
        source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "hip", "*.hip"))
159
        # Copy over additional files
160
161
        for file in glob.glob(r"torchvision/csrc/ops/cuda/*.h"):
            shutil.copy(file, "torchvision/csrc/ops/hip")
162
    else:
163
        source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "cuda", "*.cu"))
164

165
    source_cuda += glob.glob(os.path.join(extensions_dir, "ops", "autocast", "*.cpp"))
166
167
168
169

    sources = main_file + source_cpu
    extension = CppExtension

170
    compile_cpp_tests = os.getenv("WITH_CPP_MODELS_TEST", "0") == "1"
171
    if compile_cpp_tests:
172
173
174
175
        test_dir = os.path.join(this_dir, "test")
        models_dir = os.path.join(this_dir, "torchvision", "csrc", "models")
        test_file = glob.glob(os.path.join(test_dir, "*.cpp"))
        source_models = glob.glob(os.path.join(models_dir, "*.cpp"))
176
177
178
179
180

        test_file = [os.path.join(test_dir, s) for s in test_file]
        source_models = [os.path.join(models_dir, s) for s in source_models]
        tests = test_file + source_models
        tests_include_dirs = [test_dir, models_dir]
Shahriar's avatar
Shahriar committed
181

182
183
    define_macros = []

184
185
186
187
    extra_compile_args = {"cxx": []}
    if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv(
        "FORCE_CUDA", "0"
    ) == "1":
188
189
        extension = CUDAExtension
        sources += source_cuda
190
        if not is_rocm_pytorch:
191
192
193
            define_macros += [("WITH_CUDA", None)]
            nvcc_flags = os.getenv("NVCC_FLAGS", "")
            if nvcc_flags == "":
194
195
                nvcc_flags = []
            else:
196
                nvcc_flags = nvcc_flags.split(" ")
Soumith Chintala's avatar
Soumith Chintala committed
197
        else:
198
            define_macros += [("WITH_HIP", None)]
199
            nvcc_flags = []
200
        extra_compile_args["nvcc"] = nvcc_flags
201

202
203
    if sys.platform == "win32":
        define_macros += [("torchvision_EXPORTS", None)]
204
        define_macros += [("USE_PYTHON", None)]
205
        extra_compile_args["cxx"].append("/MP")
Francisco Massa's avatar
Francisco Massa committed
206

207
    debug_mode = os.getenv("DEBUG", "0") == "1"
208
209
    if debug_mode:
        print("Compile in debug mode")
210
211
        extra_compile_args["cxx"].append("-g")
        extra_compile_args["cxx"].append("-O0")
212
213
214
        if "nvcc" in extra_compile_args:
            # we have to remove "-OX" and "-g" flag if exists and append
            nvcc_flags = extra_compile_args["nvcc"]
215
            extra_compile_args["nvcc"] = [f for f in nvcc_flags if not ("-O" in f or "-g" in f)]
216
217
218
            extra_compile_args["nvcc"].append("-O0")
            extra_compile_args["nvcc"].append("-g")

219
220
    sources = [os.path.join(extensions_dir, s) for s in sources]

221
    include_dirs = [extensions_dir]
222
223
224

    ext_modules = [
        extension(
225
            "torchvision._C",
226
            sorted(sources),
227
228
            include_dirs=include_dirs,
            define_macros=define_macros,
Soumith Chintala's avatar
Soumith Chintala committed
229
            extra_compile_args=extra_compile_args,
230
        )
231
    ]
232
233
234
    if compile_cpp_tests:
        ext_modules.append(
            extension(
235
                "torchvision._C_tests",
236
237
238
239
240
241
                tests,
                include_dirs=tests_include_dirs,
                define_macros=define_macros,
                extra_compile_args=extra_compile_args,
            )
        )
242

243
    # ------------------- Torchvision extra extensions ------------------------
244
245
246
247
    vision_include = os.environ.get("TORCHVISION_INCLUDE", None)
    vision_library = os.environ.get("TORCHVISION_LIBRARY", None)
    vision_include = vision_include.split(os.pathsep) if vision_include is not None else []
    vision_library = vision_library.split(os.pathsep) if vision_library is not None else []
248
249
250
251
252
253
254
255
256
    include_dirs += vision_include
    library_dirs = vision_library

    # Image reading extension
    image_macros = []
    image_include = [extensions_dir]
    image_library = []
    image_link_flags = []

257
258
259
    if sys.platform == "win32":
        image_macros += [("USE_PYTHON", None)]

260
    # Locating libPNG
261
262
    libpng = distutils.spawn.find_executable("libpng-config")
    pngfix = distutils.spawn.find_executable("pngfix")
263
    png_found = libpng is not None or pngfix is not None
264
    print(f"PNG found: {png_found}")
265
266
267
    if png_found:
        if libpng is not None:
            # Linux / Mac
268
269
            png_version = subprocess.run([libpng, "--version"], stdout=subprocess.PIPE)
            png_version = png_version.stdout.strip().decode("utf-8")
270
            print(f"libpng version: {png_version}")
271
272
            png_version = parse_version(png_version)
            if png_version >= parse_version("1.6.0"):
273
274
275
276
                print("Building torchvision with PNG image support")
                png_lib = subprocess.run([libpng, "--libdir"], stdout=subprocess.PIPE)
                png_lib = png_lib.stdout.strip().decode("utf-8")
                if "disabled" not in png_lib:
277
                    image_library += [png_lib]
278
279
280
                png_include = subprocess.run([libpng, "--I_opts"], stdout=subprocess.PIPE)
                png_include = png_include.stdout.strip().decode("utf-8")
                _, png_include = png_include.split("-I")
281
                print(f"libpng include path: {png_include}")
282
                image_include += [png_include]
283
                image_link_flags.append("png")
284
            else:
285
                print("libpng installed version is less than 1.6.0, disabling PNG support")
286
287
288
                png_found = False
        else:
            # Windows
289
290
            png_lib = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "lib")
            png_include = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "include", "libpng16")
291
292
            image_library += [png_lib]
            image_include += [png_include]
293
            image_link_flags.append("libpng")
294

295
    # Locating libjpeg
296
    (jpeg_found, jpeg_conda, jpeg_include, jpeg_lib) = find_library("jpeglib", vision_include)
297

298
    print(f"JPEG found: {jpeg_found}")
299
300
    image_macros += [("PNG_FOUND", str(int(png_found)))]
    image_macros += [("JPEG_FOUND", str(int(jpeg_found)))]
301
    if jpeg_found:
302
303
        print("Building torchvision with JPEG image support")
        image_link_flags.append("jpeg")
304
305
306
307
        if jpeg_conda:
            image_library += [jpeg_lib]
            image_include += [jpeg_include]

308
309
310
    # Locating nvjpeg
    # Should be included in CUDA_HOME for CUDA >= 10.1, which is the minimum version we have in the CI
    nvjpeg_found = (
311
312
313
        extension is CUDAExtension
        and CUDA_HOME is not None
        and os.path.exists(os.path.join(CUDA_HOME, "include", "nvjpeg.h"))
314
315
    )

316
    print(f"NVJPEG found: {nvjpeg_found}")
317
    image_macros += [("NVJPEG_FOUND", str(int(nvjpeg_found)))]
318
    if nvjpeg_found:
319
320
321
322
323
324
325
326
327
        print("Building torchvision with NVJPEG image support")
        image_link_flags.append("nvjpeg")

    image_path = os.path.join(extensions_dir, "io", "image")
    image_src = (
        glob.glob(os.path.join(image_path, "*.cpp"))
        + glob.glob(os.path.join(image_path, "cpu", "*.cpp"))
        + glob.glob(os.path.join(image_path, "cuda", "*.cpp"))
    )
328

329
    if png_found or jpeg_found:
330
331
332
333
334
335
336
337
338
339
340
341
342
        ext_modules.append(
            extension(
                "torchvision.image",
                image_src,
                include_dirs=image_include + include_dirs + [image_path],
                library_dirs=image_library + library_dirs,
                define_macros=image_macros,
                libraries=image_link_flags,
                extra_compile_args=extra_compile_args,
            )
        )

    ffmpeg_exe = distutils.spawn.find_executable("ffmpeg")
343
    has_ffmpeg = ffmpeg_exe is not None
344
345
346
347
    # FIXME: Building torchvision with ffmpeg on MacOS or with Python 3.9
    # FIXME: causes crash. See the following GitHub issues for more details.
    # FIXME: https://github.com/pytorch/pytorch/issues/65000
    # FIXME: https://github.com/pytorch/vision/issues/3367
348
    if sys.platform != "linux" or (sys.version_info.major == 3 and sys.version_info.minor == 9):
349
        has_ffmpeg = False
350
351
    if has_ffmpeg:
        try:
352
353
354
            # This is to check if ffmpeg is installed properly.
            subprocess.check_output(["ffmpeg", "-version"])
        except subprocess.CalledProcessError:
355
            print("Error fetching ffmpeg version, ignoring ffmpeg.")
356
357
            has_ffmpeg = False

358
    print(f"FFmpeg found: {has_ffmpeg}")
359

360
    if has_ffmpeg:
361
        ffmpeg_libraries = {"libavcodec", "libavformat", "libavutil", "libswresample", "libswscale"}
362

363
364
        ffmpeg_bin = os.path.dirname(ffmpeg_exe)
        ffmpeg_root = os.path.dirname(ffmpeg_bin)
365
366
        ffmpeg_include_dir = os.path.join(ffmpeg_root, "include")
        ffmpeg_library_dir = os.path.join(ffmpeg_root, "lib")
367

368
369
370
        gcc = distutils.spawn.find_executable("gcc")
        platform_tag = subprocess.run([gcc, "-print-multiarch"], stdout=subprocess.PIPE)
        platform_tag = platform_tag.stdout.strip().decode("utf-8")
371
372
373

        if platform_tag:
            # Most probably a Debian-based distribution
374
375
            ffmpeg_include_dir = [ffmpeg_include_dir, os.path.join(ffmpeg_include_dir, platform_tag)]
            ffmpeg_library_dir = [ffmpeg_library_dir, os.path.join(ffmpeg_library_dir, platform_tag)]
376
377
378
379
380
381
382
383
        else:
            ffmpeg_include_dir = [ffmpeg_include_dir]
            ffmpeg_library_dir = [ffmpeg_library_dir]

        has_ffmpeg = True
        for library in ffmpeg_libraries:
            library_found = False
            for search_path in ffmpeg_include_dir + include_dirs:
384
                full_path = os.path.join(search_path, library, "*.h")
385
386
387
                library_found |= len(glob.glob(full_path)) > 0

            if not library_found:
388
                print(f"{library} header files were not found, disabling ffmpeg support")
389
390
391
                has_ffmpeg = False

    if has_ffmpeg:
392
393
        print(f"ffmpeg include path: {ffmpeg_include_dir}")
        print(f"ffmpeg library_dir: {ffmpeg_library_dir}")
394
395

        # TorchVision base decoder + video reader
396
        video_reader_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video_reader")
397
        video_reader_src = glob.glob(os.path.join(video_reader_src_dir, "*.cpp"))
398
399
        base_decoder_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "decoder")
        base_decoder_src = glob.glob(os.path.join(base_decoder_src_dir, "*.cpp"))
400
        # Torchvision video API
401
        videoapi_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video")
402
        videoapi_src = glob.glob(os.path.join(videoapi_src_dir, "*.cpp"))
403
        # exclude tests
404
        base_decoder_src = [x for x in base_decoder_src if "_test.cpp" not in x]
405

406
        combined_src = video_reader_src + base_decoder_src + videoapi_src
407

408
409
        ext_modules.append(
            CppExtension(
410
                "torchvision.video_reader",
411
                combined_src,
412
                include_dirs=[
413
                    base_decoder_src_dir,
414
                    video_reader_src_dir,
415
                    videoapi_src_dir,
416
                    extensions_dir,
417
                    *ffmpeg_include_dir,
418
                    *include_dirs,
419
                ],
420
                library_dirs=ffmpeg_library_dir + library_dirs,
421
                libraries=[
422
423
424
425
426
                    "avcodec",
                    "avformat",
                    "avutil",
                    "swresample",
                    "swscale",
427
                ],
428
429
                extra_compile_args=["-std=c++14"] if os.name != "nt" else ["/std:c++14", "/MP"],
                extra_link_args=["-std=c++14" if os.name != "nt" else "/std:c++14"],
430
431
            )
        )
432

Prabhat Roy's avatar
Prabhat Roy committed
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
    # Locating video codec
    # CUDA_HOME should be set to the cuda root directory.
    # TORCHVISION_INCLUDE and TORCHVISION_LIBRARY should include the location to
    # video codec header files and libraries respectively.
    video_codec_found = (
        extension is CUDAExtension
        and CUDA_HOME is not None
        and any([os.path.exists(os.path.join(folder, "cuviddec.h")) for folder in vision_include])
        and any([os.path.exists(os.path.join(folder, "nvcuvid.h")) for folder in vision_include])
        and any([os.path.exists(os.path.join(folder, "libnvcuvid.so")) for folder in library_dirs])
    )

    print(f"video codec found: {video_codec_found}")

    if (
        video_codec_found
        and has_ffmpeg
        and any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
    ):
        gpu_decoder_path = os.path.join(extensions_dir, "io", "decoder", "gpu")
        gpu_decoder_src = glob.glob(os.path.join(gpu_decoder_path, "*.cpp"))
        cuda_libs = os.path.join(CUDA_HOME, "lib64")
        cuda_inc = os.path.join(CUDA_HOME, "include")

        ext_modules.append(
            extension(
                "torchvision.Decoder",
                gpu_decoder_src,
                include_dirs=include_dirs + [gpu_decoder_path] + [cuda_inc] + ffmpeg_include_dir,
                library_dirs=ffmpeg_library_dir + library_dirs + [cuda_libs],
                libraries=[
                    "avcodec",
                    "avformat",
                    "avutil",
                    "swresample",
                    "swscale",
                    "nvcuvid",
                    "cuda",
                    "cudart",
                    "z",
                    "pthread",
                    "dl",
475
                    "nppicc",
Prabhat Roy's avatar
Prabhat Roy committed
476
477
478
479
480
481
482
483
484
485
486
                ],
                extra_compile_args=extra_compile_args,
            )
        )
    else:
        print(
            "The installed version of ffmpeg is missing the header file 'bsf.h' which is "
            "required for GPU video decoding. Please install the latest ffmpeg from conda-forge channel:"
            " `conda install -c conda-forge ffmpeg`."
        )

487
488
489
490
491
    return ext_modules


class clean(distutils.command.clean.clean):
    def run(self):
492
        with open(".gitignore") as f:
493
            ignores = f.read()
494
            for wildcard in filter(None, ignores.split("\n")):
495
496
497
498
499
500
501
502
503
504
                for filename in glob.glob(wildcard):
                    try:
                        os.remove(filename)
                    except OSError:
                        shutil.rmtree(filename, ignore_errors=True)

        # It's an old-style class in Python 2.7...
        distutils.command.clean.clean.run(self)


505
if __name__ == "__main__":
506
    print(f"Building wheel {package_name}-{version}")
507
508
509

    write_version_file()

510
    with open("README.rst") as f:
511
512
513
514
515
516
        readme = f.read()

    setup(
        # Metadata
        name=package_name,
        version=version,
517
518
519
520
        author="PyTorch Core Team",
        author_email="soumith@pytorch.org",
        url="https://github.com/pytorch/vision",
        description="image and video datasets and models for torch deep learning",
521
        long_description=readme,
522
        license="BSD",
523
        # Package info
524
        packages=find_packages(exclude=("test",)),
525
        package_data={package_name: ["*.dll", "*.dylib", "*.so", "prototype/datasets/_builtin/*.categories"]},
526
527
528
529
530
531
        zip_safe=False,
        install_requires=requirements,
        extras_require={
            "scipy": ["scipy"],
        },
        ext_modules=get_extensions(),
532
        python_requires=">=3.7",
533
        cmdclass={
534
535
536
            "build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
            "clean": clean,
        },
537
    )