setup.py 21.7 KB
Newer Older
1
import distutils.command.clean
2
import distutils.spawn
3
import glob
limm's avatar
limm committed
4
import os
5
import shutil
limm's avatar
limm committed
6
7
import subprocess
import sys
8
9

import torch
limm's avatar
limm committed
10
11
12
from pkg_resources import DistributionNotFound, get_distribution, parse_version
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDA_HOME, CUDAExtension
soumith's avatar
soumith committed
13
14


Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
15
def read(*names, **kwargs):
limm's avatar
limm committed
16
    with open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp:
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
17
18
        return fp.read()

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
19

20
21
22
23
24
25
26
def get_dist(pkgname):
    try:
        return get_distribution(pkgname)
    except DistributionNotFound:
        return None


27
28
cwd = os.path.dirname(os.path.abspath(__file__))

limm's avatar
limm committed
29
30
version_txt = os.path.join(cwd, "version.txt")
with open(version_txt) as f:
31
    version = f.readline().strip()
limm's avatar
limm committed
32
33
sha = "Unknown"
package_name = "torchvision"
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
34

35
try:
limm's avatar
limm committed
36
    sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip()
37
38
39
except Exception:
    pass

limm's avatar
limm committed
40
41
42
43
if os.getenv("BUILD_VERSION"):
    version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
    version += "+" + sha[:7]
44
45
46


def write_version_file():
limm's avatar
limm committed
47
48
49
50
    version_path = os.path.join(cwd, "torchvision", "version.py")
    with open(version_path, "w") as f:
        f.write(f"__version__ = '{version}'\n")
        f.write(f"git_version = {repr(sha)}\n")
51
52
53
        f.write("from torchvision.extension import _check_cuda_version\n")
        f.write("if _check_cuda_version() > 0:\n")
        f.write("    cuda = _check_cuda_version()\n")
54
55


limm's avatar
limm committed
56
57
58
pytorch_dep = "torch"
if os.getenv("PYTORCH_VERSION"):
    pytorch_dep += "==" + os.getenv("PYTORCH_VERSION")
soumith's avatar
soumith committed
59

60
requirements = [
limm's avatar
limm committed
61
    "numpy",
62
    pytorch_dep,
63
64
]

limm's avatar
limm committed
65
66
67
# Excluding 8.3.* because of https://github.com/pytorch/vision/issues/4934
pillow_ver = " >= 5.3.0, !=8.3.*"
pillow_req = "pillow-simd" if get_dist("pillow-simd") is not None else "pillow"
68
69
requirements.append(pillow_req + pillow_ver)

70

71
72
def find_library(name, vision_include):
    this_dir = os.path.dirname(os.path.abspath(__file__))
limm's avatar
limm committed
73
    build_prefix = os.environ.get("BUILD_PREFIX", None)
74
75
76
77
78
79
    is_conda_build = build_prefix is not None

    library_found = False
    conda_installed = False
    lib_folder = None
    include_folder = None
limm's avatar
limm committed
80
    library_header = f"{name}.h"
81

82
    # Lookup in TORCHVISION_INCLUDE or in the package file
limm's avatar
limm committed
83
    package_path = [os.path.join(this_dir, "torchvision")]
84
85
86
87
88
89
90
    for folder in vision_include + package_path:
        candidate_path = os.path.join(folder, library_header)
        library_found = os.path.exists(candidate_path)
        if library_found:
            break

    if not library_found:
limm's avatar
limm committed
91
        print(f"Running build on conda-build: {is_conda_build}")
92
93
        if is_conda_build:
            # Add conda headers/libraries
limm's avatar
limm committed
94
95
96
97
98
            if os.name == "nt":
                build_prefix = os.path.join(build_prefix, "Library")
            include_folder = os.path.join(build_prefix, "include")
            lib_folder = os.path.join(build_prefix, "lib")
            library_header_path = os.path.join(include_folder, library_header)
99
100
101
            library_found = os.path.isfile(library_header_path)
            conda_installed = library_found
        else:
102
            # Check if using Anaconda to produce wheels
limm's avatar
limm committed
103
            conda = shutil.which("conda")
104
            is_conda = conda is not None
limm's avatar
limm committed
105
            print(f"Running build on conda: {is_conda}")
106
107
108
            if is_conda:
                python_executable = sys.executable
                py_folder = os.path.dirname(python_executable)
limm's avatar
limm committed
109
110
                if os.name == "nt":
                    env_path = os.path.join(py_folder, "Library")
111
112
                else:
                    env_path = os.path.dirname(py_folder)
limm's avatar
limm committed
113
114
115
                lib_folder = os.path.join(env_path, "lib")
                include_folder = os.path.join(env_path, "include")
                library_header_path = os.path.join(include_folder, library_header)
116
117
118
119
                library_found = os.path.isfile(library_header_path)
                conda_installed = library_found

        if not library_found:
limm's avatar
limm committed
120
121
122
            if sys.platform == "linux":
                library_found = os.path.exists(f"/usr/include/{library_header}")
                library_found = library_found or os.path.exists(f"/usr/local/include/{library_header}")
123
124
125
126

    return library_found, conda_installed, include_folder, lib_folder


127
128
def get_extensions():
    this_dir = os.path.dirname(os.path.abspath(__file__))
limm's avatar
limm committed
129
    extensions_dir = os.path.join(this_dir, "torchvision", "csrc")
130

limm's avatar
limm committed
131
132
133
134
135
    main_file = (
        glob.glob(os.path.join(extensions_dir, "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "autocast", "*.cpp"))
    )
136
    source_cpu = (
limm's avatar
limm committed
137
138
139
        glob.glob(os.path.join(extensions_dir, "ops", "autograd", "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "cpu", "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "quantized", "cpu", "*.cpp"))
140
    )
limm's avatar
limm committed
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
    source_mps = glob.glob(os.path.join(extensions_dir, "ops", "mps", "*.mm"))

    print("Compiling extensions with following flags:")
    force_cuda = os.getenv("FORCE_CUDA", "0") == "1"
    print(f"  FORCE_CUDA: {force_cuda}")
    force_mps = os.getenv("FORCE_MPS", "0") == "1"
    print(f"  FORCE_MPS: {force_mps}")
    debug_mode = os.getenv("DEBUG", "0") == "1"
    print(f"  DEBUG: {debug_mode}")
    use_png = os.getenv("TORCHVISION_USE_PNG", "1") == "1"
    print(f"  TORCHVISION_USE_PNG: {use_png}")
    use_jpeg = os.getenv("TORCHVISION_USE_JPEG", "1") == "1"
    print(f"  TORCHVISION_USE_JPEG: {use_jpeg}")
    use_nvjpeg = os.getenv("TORCHVISION_USE_NVJPEG", "1") == "1"
    print(f"  TORCHVISION_USE_NVJPEG: {use_nvjpeg}")
    use_ffmpeg = os.getenv("TORCHVISION_USE_FFMPEG", "1") == "1"
    print(f"  TORCHVISION_USE_FFMPEG: {use_ffmpeg}")
    use_video_codec = os.getenv("TORCHVISION_USE_VIDEO_CODEC", "1") == "1"
    print(f"  TORCHVISION_USE_VIDEO_CODEC: {use_video_codec}")

    nvcc_flags = os.getenv("NVCC_FLAGS", "")
    print(f"  NVCC_FLAGS: {nvcc_flags}")
163
164

    is_rocm_pytorch = False
limm's avatar
limm committed
165
166

    if torch.__version__ >= "1.5":
167
        from torch.utils.cpp_extension import ROCM_HOME
limm's avatar
limm committed
168
169

        is_rocm_pytorch = (torch.version.hip is not None) and (ROCM_HOME is not None)
170
171

    if is_rocm_pytorch:
limm's avatar
limm committed
172
173
        from torch.utils.hipify import hipify_python

174
175
176
        hipify_python.hipify(
            project_directory=this_dir,
            output_directory=this_dir,
177
            includes="torchvision/csrc/ops/cuda/*",
178
179
            show_detailed=True,
            is_pytorch_extension=True,
180
        )
limm's avatar
limm committed
181
        source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "hip", "*.hip"))
182
        # Copy over additional files
183
184
        for file in glob.glob(r"torchvision/csrc/ops/cuda/*.h"):
            shutil.copy(file, "torchvision/csrc/ops/hip")
185
    else:
limm's avatar
limm committed
186
        source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "cuda", "*.cu"))
187
188
189
190
191
192

    sources = main_file + source_cpu
    extension = CppExtension

    define_macros = []

limm's avatar
limm committed
193
194
    extra_compile_args = {"cxx": []}
    if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or force_cuda:
195
196
        extension = CUDAExtension
        sources += source_cuda
197
        if not is_rocm_pytorch:
limm's avatar
limm committed
198
199
            define_macros += [("WITH_CUDA", None)]
            if nvcc_flags == "":
200
201
                nvcc_flags = []
            else:
limm's avatar
limm committed
202
                nvcc_flags = nvcc_flags.split(" ")
Soumith Chintala's avatar
Soumith Chintala committed
203
        else:
limm's avatar
limm committed
204
            define_macros += [("WITH_HIP", None)]
205
            nvcc_flags = []
206
        extra_compile_args["nvcc"] = nvcc_flags
limm's avatar
limm committed
207
208
    elif torch.backends.mps.is_available() or force_mps:
        sources += source_mps
209

limm's avatar
limm committed
210
211
212
    if sys.platform == "win32":
        define_macros += [("torchvision_EXPORTS", None)]
        extra_compile_args["cxx"].append("/MP")
213

214
    if debug_mode:
limm's avatar
limm committed
215
216
217
        print("Compiling in debug mode")
        extra_compile_args["cxx"].append("-g")
        extra_compile_args["cxx"].append("-O0")
218
219
220
        if "nvcc" in extra_compile_args:
            # we have to remove "-OX" and "-g" flag if exists and append
            nvcc_flags = extra_compile_args["nvcc"]
limm's avatar
limm committed
221
            extra_compile_args["nvcc"] = [f for f in nvcc_flags if not ("-O" in f or "-g" in f)]
222
223
            extra_compile_args["nvcc"].append("-O0")
            extra_compile_args["nvcc"].append("-g")
limm's avatar
limm committed
224
225
226
    else:
        print("Compiling with debug mode OFF")
        extra_compile_args["cxx"].append("-g0")
227

228
229
    sources = [os.path.join(extensions_dir, s) for s in sources]

230
    include_dirs = [extensions_dir]
231
232
233

    ext_modules = [
        extension(
limm's avatar
limm committed
234
            "torchvision._C",
235
            sorted(sources),
236
237
            include_dirs=include_dirs,
            define_macros=define_macros,
Soumith Chintala's avatar
Soumith Chintala committed
238
            extra_compile_args=extra_compile_args,
239
        )
240
    ]
241

242
    # ------------------- Torchvision extra extensions ------------------------
limm's avatar
limm committed
243
244
245
246
    vision_include = os.environ.get("TORCHVISION_INCLUDE", None)
    vision_library = os.environ.get("TORCHVISION_LIBRARY", None)
    vision_include = vision_include.split(os.pathsep) if vision_include is not None else []
    vision_library = vision_library.split(os.pathsep) if vision_library is not None else []
247
248
249
250
251
252
253
254
255
256
    include_dirs += vision_include
    library_dirs = vision_library

    # Image reading extension
    image_macros = []
    image_include = [extensions_dir]
    image_library = []
    image_link_flags = []

    # Locating libPNG
limm's avatar
limm committed
257
258
    libpng = shutil.which("libpng-config")
    pngfix = shutil.which("pngfix")
259
    png_found = libpng is not None or pngfix is not None
limm's avatar
limm committed
260
261
262
263

    use_png = use_png and png_found
    if use_png:
        print("Found PNG library")
264
265
        if libpng is not None:
            # Linux / Mac
limm's avatar
limm committed
266
267
268
            min_version = "1.6.0"
            png_version = subprocess.run([libpng, "--version"], stdout=subprocess.PIPE)
            png_version = png_version.stdout.strip().decode("utf-8")
269
            png_version = parse_version(png_version)
limm's avatar
limm committed
270
271
272
273
274
            if png_version >= parse_version(min_version):
                print("Building torchvision with PNG image support")
                png_lib = subprocess.run([libpng, "--libdir"], stdout=subprocess.PIPE)
                png_lib = png_lib.stdout.strip().decode("utf-8")
                if "disabled" not in png_lib:
275
                    image_library += [png_lib]
limm's avatar
limm committed
276
277
278
                png_include = subprocess.run([libpng, "--I_opts"], stdout=subprocess.PIPE)
                png_include = png_include.stdout.strip().decode("utf-8")
                _, png_include = png_include.split("-I")
279
                image_include += [png_include]
limm's avatar
limm committed
280
281
282
                image_link_flags.append("png")
                print(f"  libpng version: {png_version}")
                print(f"  libpng include path: {png_include}")
283
            else:
limm's avatar
limm committed
284
285
286
                print("Could not add PNG image support to torchvision:")
                print(f"  libpng minimum version {min_version}, found {png_version}")
                use_png = False
287
288
        else:
            # Windows
limm's avatar
limm committed
289
290
            png_lib = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "lib")
            png_include = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "include", "libpng16")
291
292
            image_library += [png_lib]
            image_include += [png_include]
limm's avatar
limm committed
293
294
295
296
            image_link_flags.append("libpng")
    else:
        print("Building torchvision without PNG image support")
    image_macros += [("PNG_FOUND", str(int(use_png)))]
297

298
    # Locating libjpeg
limm's avatar
limm committed
299
300
301
302
303
304
305
306
    (jpeg_found, jpeg_conda, jpeg_include, jpeg_lib) = find_library("jpeglib", vision_include)

    use_jpeg = use_jpeg and jpeg_found
    if use_jpeg:
        print("Building torchvision with JPEG image support")
        print(f"  libjpeg include path: {jpeg_include}")
        print(f"  libjpeg lib path: {jpeg_lib}")
        image_link_flags.append("jpeg")
307
308
309
        if jpeg_conda:
            image_library += [jpeg_lib]
            image_include += [jpeg_include]
limm's avatar
limm committed
310
311
312
    else:
        print("Building torchvision without JPEG image support")
    image_macros += [("JPEG_FOUND", str(int(use_jpeg)))]
313

314
315
316
    # Locating nvjpeg
    # Should be included in CUDA_HOME for CUDA >= 10.1, which is the minimum version we have in the CI
    nvjpeg_found = (
limm's avatar
limm committed
317
318
319
        extension is CUDAExtension
        and CUDA_HOME is not None
        and os.path.exists(os.path.join(CUDA_HOME, "include", "nvjpeg.h"))
320
321
    )

limm's avatar
limm committed
322
323
324
325
326
327
328
329
330
331
332
333
334
335
    use_nvjpeg = use_nvjpeg and nvjpeg_found
    if use_nvjpeg:
        print("Building torchvision with NVJPEG image support")
        image_link_flags.append("nvjpeg")
    else:
        print("Building torchvision without NVJPEG image support")
    image_macros += [("NVJPEG_FOUND", str(int(use_nvjpeg)))]

    image_path = os.path.join(extensions_dir, "io", "image")
    image_src = (
        glob.glob(os.path.join(image_path, "*.cpp"))
        + glob.glob(os.path.join(image_path, "cpu", "*.cpp"))
        + glob.glob(os.path.join(image_path, "cpu", "giflib", "*.c"))
    )
336

limm's avatar
limm committed
337
338
339
340
341
342
    if is_rocm_pytorch:
        image_src += glob.glob(os.path.join(image_path, "hip", "*.cpp"))
        # we need to exclude this in favor of the hipified source
        image_src.remove(os.path.join(image_path, "image.cpp"))
    else:
        image_src += glob.glob(os.path.join(image_path, "cuda", "*.cpp"))
343

limm's avatar
limm committed
344
345
346
    ext_modules.append(
        extension(
            "torchvision.image",
347
348
349
350
351
            image_src,
            include_dirs=image_include + include_dirs + [image_path],
            library_dirs=image_library + library_dirs,
            define_macros=image_macros,
            libraries=image_link_flags,
limm's avatar
limm committed
352
353
354
            extra_compile_args=extra_compile_args,
        )
    )
355

limm's avatar
limm committed
356
357
    # Locating ffmpeg
    ffmpeg_exe = shutil.which("ffmpeg")
358
    has_ffmpeg = ffmpeg_exe is not None
limm's avatar
limm committed
359
360
361
362
363
364
365
    ffmpeg_version = None
    # FIXME: Building torchvision with ffmpeg on MacOS or with Python 3.9
    # FIXME: causes crash. See the following GitHub issues for more details.
    # FIXME: https://github.com/pytorch/pytorch/issues/65000
    # FIXME: https://github.com/pytorch/vision/issues/3367
    if sys.platform != "linux" or (sys.version_info.major == 3 and sys.version_info.minor == 9):
        has_ffmpeg = False
366
    if has_ffmpeg:
limm's avatar
limm committed
367
368
369
370
371
372
373
374
375
376
377
378
        try:
            # This is to check if ffmpeg is installed properly.
            ffmpeg_version = subprocess.check_output(["ffmpeg", "-version"])
        except subprocess.CalledProcessError:
            print("Building torchvision without ffmpeg support")
            print("  Error fetching ffmpeg version, ignoring ffmpeg.")
            has_ffmpeg = False

    use_ffmpeg = use_ffmpeg and has_ffmpeg

    if use_ffmpeg:
        ffmpeg_libraries = {"libavcodec", "libavformat", "libavutil", "libswresample", "libswscale"}
379

380
381
        ffmpeg_bin = os.path.dirname(ffmpeg_exe)
        ffmpeg_root = os.path.dirname(ffmpeg_bin)
limm's avatar
limm committed
382
383
        ffmpeg_include_dir = os.path.join(ffmpeg_root, "include")
        ffmpeg_library_dir = os.path.join(ffmpeg_root, "lib")
384

limm's avatar
limm committed
385
386
387
        gcc = os.environ.get("CC", shutil.which("gcc"))
        platform_tag = subprocess.run([gcc, "-print-multiarch"], stdout=subprocess.PIPE)
        platform_tag = platform_tag.stdout.strip().decode("utf-8")
388
389
390

        if platform_tag:
            # Most probably a Debian-based distribution
limm's avatar
limm committed
391
392
            ffmpeg_include_dir = [ffmpeg_include_dir, os.path.join(ffmpeg_include_dir, platform_tag)]
            ffmpeg_library_dir = [ffmpeg_library_dir, os.path.join(ffmpeg_library_dir, platform_tag)]
393
394
395
396
397
398
399
        else:
            ffmpeg_include_dir = [ffmpeg_include_dir]
            ffmpeg_library_dir = [ffmpeg_library_dir]

        for library in ffmpeg_libraries:
            library_found = False
            for search_path in ffmpeg_include_dir + include_dirs:
limm's avatar
limm committed
400
                full_path = os.path.join(search_path, library, "*.h")
401
402
403
                library_found |= len(glob.glob(full_path)) > 0

            if not library_found:
limm's avatar
limm committed
404
405
406
407
408
                print("Building torchvision without ffmpeg support")
                print(f"  {library} header files were not found, disabling ffmpeg support")
                use_ffmpeg = False
    else:
        print("Building torchvision without ffmpeg support")
409

limm's avatar
limm committed
410
411
412
413
414
    if use_ffmpeg:
        print("Building torchvision with ffmpeg support")
        print(f"  ffmpeg version: {ffmpeg_version}")
        print(f"  ffmpeg include path: {ffmpeg_include_dir}")
        print(f"  ffmpeg library_dir: {ffmpeg_library_dir}")
415
416

        # TorchVision base decoder + video reader
limm's avatar
limm committed
417
        video_reader_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video_reader")
418
        video_reader_src = glob.glob(os.path.join(video_reader_src_dir, "*.cpp"))
limm's avatar
limm committed
419
420
        base_decoder_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "decoder")
        base_decoder_src = glob.glob(os.path.join(base_decoder_src_dir, "*.cpp"))
421
        # Torchvision video API
limm's avatar
limm committed
422
        videoapi_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video")
423
        videoapi_src = glob.glob(os.path.join(videoapi_src_dir, "*.cpp"))
424
        # exclude tests
limm's avatar
limm committed
425
        base_decoder_src = [x for x in base_decoder_src if "_test.cpp" not in x]
426

427
        combined_src = video_reader_src + base_decoder_src + videoapi_src
428

429
430
        ext_modules.append(
            CppExtension(
limm's avatar
limm committed
431
                "torchvision.video_reader",
432
                combined_src,
433
                include_dirs=[
434
                    base_decoder_src_dir,
435
                    video_reader_src_dir,
436
                    videoapi_src_dir,
437
                    extensions_dir,
438
                    *ffmpeg_include_dir,
limm's avatar
limm committed
439
                    *include_dirs,
440
                ],
441
                library_dirs=ffmpeg_library_dir + library_dirs,
442
                libraries=[
limm's avatar
limm committed
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
                    "avcodec",
                    "avformat",
                    "avutil",
                    "swresample",
                    "swscale",
                ],
                extra_compile_args=["-std=c++17"] if os.name != "nt" else ["/std:c++17", "/MP"],
                extra_link_args=["-std=c++17" if os.name != "nt" else "/std:c++17"],
            )
        )

    # Locating video codec
    # CUDA_HOME should be set to the cuda root directory.
    # TORCHVISION_INCLUDE and TORCHVISION_LIBRARY should include the location to
    # video codec header files and libraries respectively.
    video_codec_found = (
        extension is CUDAExtension
        and CUDA_HOME is not None
        and any([os.path.exists(os.path.join(folder, "cuviddec.h")) for folder in vision_include])
        and any([os.path.exists(os.path.join(folder, "nvcuvid.h")) for folder in vision_include])
        and any([os.path.exists(os.path.join(folder, "libnvcuvid.so")) for folder in library_dirs])
    )

    use_video_codec = use_video_codec and video_codec_found
    if (
        use_video_codec
        and use_ffmpeg
        and any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
    ):
        print("Building torchvision with video codec support")
        gpu_decoder_path = os.path.join(extensions_dir, "io", "decoder", "gpu")
        gpu_decoder_src = glob.glob(os.path.join(gpu_decoder_path, "*.cpp"))
        cuda_libs = os.path.join(CUDA_HOME, "lib64")
        cuda_inc = os.path.join(CUDA_HOME, "include")

        ext_modules.append(
            extension(
                "torchvision.Decoder",
                gpu_decoder_src,
                include_dirs=include_dirs + [gpu_decoder_path] + [cuda_inc] + ffmpeg_include_dir,
                library_dirs=ffmpeg_library_dir + library_dirs + [cuda_libs],
                libraries=[
                    "avcodec",
                    "avformat",
                    "avutil",
                    "swresample",
                    "swscale",
                    "nvcuvid",
                    "cuda",
                    "cudart",
                    "z",
                    "pthread",
                    "dl",
                    "nppicc",
497
                ],
limm's avatar
limm committed
498
                extra_compile_args=extra_compile_args,
499
500
            )
        )
limm's avatar
limm committed
501
502
503
504
505
506
507
508
509
510
511
512
    else:
        print("Building torchvision without video codec support")
        if (
            use_video_codec
            and use_ffmpeg
            and not any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
        ):
            print(
                "  The installed version of ffmpeg is missing the header file 'bsf.h' which is "
                "  required for GPU video decoding. Please install the latest ffmpeg from conda-forge channel:"
                "   `conda install -c conda-forge ffmpeg`."
            )
513
514
515
516
517
518

    return ext_modules


class clean(distutils.command.clean.clean):
    def run(self):
limm's avatar
limm committed
519
        with open(".gitignore") as f:
520
            ignores = f.read()
limm's avatar
limm committed
521
            for wildcard in filter(None, ignores.split("\n")):
522
523
524
525
526
527
528
529
530
531
                for filename in glob.glob(wildcard):
                    try:
                        os.remove(filename)
                    except OSError:
                        shutil.rmtree(filename, ignore_errors=True)

        # It's an old-style class in Python 2.7...
        distutils.command.clean.clean.run(self)


532
if __name__ == "__main__":
limm's avatar
limm committed
533
    print(f"Building wheel {package_name}-{version}")
534
535
536

    write_version_file()

limm's avatar
limm committed
537
    with open("README.md") as f:
538
539
540
541
542
543
        readme = f.read()

    setup(
        # Metadata
        name=package_name,
        version=version,
limm's avatar
limm committed
544
545
546
547
        author="PyTorch Core Team",
        author_email="soumith@pytorch.org",
        url="https://github.com/pytorch/vision",
        description="image and video datasets and models for torch deep learning",
548
        long_description=readme,
limm's avatar
limm committed
549
550
        long_description_content_type="text/markdown",
        license="BSD",
551
        # Package info
limm's avatar
limm committed
552
553
        packages=find_packages(exclude=("test",)),
        package_data={package_name: ["*.dll", "*.dylib", "*.so"]},
554
555
556
        zip_safe=False,
        install_requires=requirements,
        extras_require={
limm's avatar
limm committed
557
            "gdown": ["gdown>=4.7.3"],
558
559
560
            "scipy": ["scipy"],
        },
        ext_modules=get_extensions(),
limm's avatar
limm committed
561
        python_requires=">=3.8",
562
        cmdclass={
limm's avatar
limm committed
563
564
565
            "build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
            "clean": clean,
        },
566
    )