setup.py 22.1 KB
Newer Older
1
import distutils.command.clean
2
import distutils.spawn
3
import glob
4
import os
5
import shutil
6
7
import subprocess
import sys
Nicolas Hug's avatar
Nicolas Hug committed
8
import warnings
9
10

import torch
11
12
13
from pkg_resources import DistributionNotFound, get_distribution, parse_version
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDA_HOME, CUDAExtension
soumith's avatar
soumith committed
14
15


Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
16
def read(*names, **kwargs):
17
    with open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp:
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
18
19
        return fp.read()

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
20

21
22
23
24
25
26
27
def get_dist(pkgname):
    try:
        return get_distribution(pkgname)
    except DistributionNotFound:
        return None


28
29
cwd = os.path.dirname(os.path.abspath(__file__))

30
version_txt = os.path.join(cwd, "version.txt")
31
with open(version_txt) as f:
32
    version = f.readline().strip()
33
34
sha = "Unknown"
package_name = "torchvision"
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
35

36
try:
37
    sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip()
38
39
40
except Exception:
    pass

41
42
43
44
if os.getenv("BUILD_VERSION"):
    version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
    version += "+" + sha[:7]
45
46
47


def write_version_file():
48
49
    version_path = os.path.join(cwd, "torchvision", "version.py")
    with open(version_path, "w") as f:
50
51
        f.write(f"__version__ = '{version}'\n")
        f.write(f"git_version = {repr(sha)}\n")
52
53
54
        f.write("from torchvision.extension import _check_cuda_version\n")
        f.write("if _check_cuda_version() > 0:\n")
        f.write("    cuda = _check_cuda_version()\n")
55
56


57
58
59
pytorch_dep = "torch"
if os.getenv("PYTORCH_VERSION"):
    pytorch_dep += "==" + os.getenv("PYTORCH_VERSION")
soumith's avatar
soumith committed
60

61
requirements = [
62
    "numpy",
63
    pytorch_dep,
64
65
]

66
67
# Excluding 8.3.* because of https://github.com/pytorch/vision/issues/4934
pillow_ver = " >= 5.3.0, !=8.3.*"
68
pillow_req = "pillow-simd" if get_dist("pillow-simd") is not None else "pillow"
69
70
requirements.append(pillow_req + pillow_ver)

71

72
73
def find_library(name, vision_include):
    this_dir = os.path.dirname(os.path.abspath(__file__))
74
    build_prefix = os.environ.get("BUILD_PREFIX", None)
75
76
77
78
79
80
    is_conda_build = build_prefix is not None

    library_found = False
    conda_installed = False
    lib_folder = None
    include_folder = None
81
    library_header = f"{name}.h"
82

83
    # Lookup in TORCHVISION_INCLUDE or in the package file
84
    package_path = [os.path.join(this_dir, "torchvision")]
85
86
87
88
89
90
91
    for folder in vision_include + package_path:
        candidate_path = os.path.join(folder, library_header)
        library_found = os.path.exists(candidate_path)
        if library_found:
            break

    if not library_found:
92
        print(f"Running build on conda-build: {is_conda_build}")
93
94
        if is_conda_build:
            # Add conda headers/libraries
95
96
97
98
99
            if os.name == "nt":
                build_prefix = os.path.join(build_prefix, "Library")
            include_folder = os.path.join(build_prefix, "include")
            lib_folder = os.path.join(build_prefix, "lib")
            library_header_path = os.path.join(include_folder, library_header)
100
101
102
            library_found = os.path.isfile(library_header_path)
            conda_installed = library_found
        else:
103
            # Check if using Anaconda to produce wheels
104
            conda = shutil.which("conda")
105
            is_conda = conda is not None
106
            print(f"Running build on conda: {is_conda}")
107
108
109
            if is_conda:
                python_executable = sys.executable
                py_folder = os.path.dirname(python_executable)
110
111
                if os.name == "nt":
                    env_path = os.path.join(py_folder, "Library")
112
113
                else:
                    env_path = os.path.dirname(py_folder)
114
115
116
                lib_folder = os.path.join(env_path, "lib")
                include_folder = os.path.join(env_path, "include")
                library_header_path = os.path.join(include_folder, library_header)
117
118
119
120
                library_found = os.path.isfile(library_header_path)
                conda_installed = library_found

        if not library_found:
121
            if sys.platform == "linux":
122
123
                library_found = os.path.exists(f"/usr/include/{library_header}")
                library_found = library_found or os.path.exists(f"/usr/local/include/{library_header}")
124
125
126
127

    return library_found, conda_installed, include_folder, lib_folder


128
129
def get_extensions():
    this_dir = os.path.dirname(os.path.abspath(__file__))
130
    extensions_dir = os.path.join(this_dir, "torchvision", "csrc")
131

132
133
134
135
    main_file = (
        glob.glob(os.path.join(extensions_dir, "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "autocast", "*.cpp"))
136
    )
137
    source_cpu = (
138
139
140
        glob.glob(os.path.join(extensions_dir, "ops", "autograd", "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "cpu", "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "quantized", "cpu", "*.cpp"))
141
    )
142

143
144
145
    print("Compiling extensions with following flags:")
    force_cuda = os.getenv("FORCE_CUDA", "0") == "1"
    print(f"  FORCE_CUDA: {force_cuda}")
146
147
    force_mps = os.getenv("FORCE_MPS", "0") == "1"
    print(f"  FORCE_MPS: {force_mps}")
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
    debug_mode = os.getenv("DEBUG", "0") == "1"
    print(f"  DEBUG: {debug_mode}")
    use_png = os.getenv("TORCHVISION_USE_PNG", "1") == "1"
    print(f"  TORCHVISION_USE_PNG: {use_png}")
    use_jpeg = os.getenv("TORCHVISION_USE_JPEG", "1") == "1"
    print(f"  TORCHVISION_USE_JPEG: {use_jpeg}")
    use_nvjpeg = os.getenv("TORCHVISION_USE_NVJPEG", "1") == "1"
    print(f"  TORCHVISION_USE_NVJPEG: {use_nvjpeg}")
    use_ffmpeg = os.getenv("TORCHVISION_USE_FFMPEG", "1") == "1"
    print(f"  TORCHVISION_USE_FFMPEG: {use_ffmpeg}")
    use_video_codec = os.getenv("TORCHVISION_USE_VIDEO_CODEC", "1") == "1"
    print(f"  TORCHVISION_USE_VIDEO_CODEC: {use_video_codec}")

    nvcc_flags = os.getenv("NVCC_FLAGS", "")
    print(f"  NVCC_FLAGS: {nvcc_flags}")

164
    is_rocm_pytorch = False
165
166

    if torch.__version__ >= "1.5":
167
        from torch.utils.cpp_extension import ROCM_HOME
168

169
        is_rocm_pytorch = (torch.version.hip is not None) and (ROCM_HOME is not None)
170
171

    if is_rocm_pytorch:
172
        from torch.utils.hipify import hipify_python
173

174
175
176
        hipify_python.hipify(
            project_directory=this_dir,
            output_directory=this_dir,
177
            includes="torchvision/csrc/ops/cuda/*",
178
179
            show_detailed=True,
            is_pytorch_extension=True,
180
        )
181
        source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "hip", "*.hip"))
182
        # Copy over additional files
183
184
        for file in glob.glob(r"torchvision/csrc/ops/cuda/*.h"):
            shutil.copy(file, "torchvision/csrc/ops/hip")
185
    else:
186
        source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "cuda", "*.cu"))
187

188
189
190
191
192
    sources = main_file + source_cpu
    extension = CppExtension

    define_macros = []

193
    extra_compile_args = {"cxx": []}
194
    if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or force_cuda:
195
196
        extension = CUDAExtension
        sources += source_cuda
197
        if not is_rocm_pytorch:
198
199
            define_macros += [("WITH_CUDA", None)]
            if nvcc_flags == "":
200
201
                nvcc_flags = []
            else:
202
                nvcc_flags = nvcc_flags.split(" ")
Soumith Chintala's avatar
Soumith Chintala committed
203
        else:
204
            define_macros += [("WITH_HIP", None)]
205
            nvcc_flags = []
206
        extra_compile_args["nvcc"] = nvcc_flags
Nicolas Hug's avatar
Nicolas Hug committed
207
208
209
210
211
212
213
214
215

    # FIXME: MPS build breaks custom ops registration, so it was disabled.
    # See https://github.com/pytorch/vision/issues/8456.
    # TODO: Fix MPS build, remove warning below, and put back commented-out elif block.V
    if force_mps:
        warnings.warn("MPS build is temporarily disabled!!!!")
    # elif torch.backends.mps.is_available() or force_mps:
    #     source_mps = glob.glob(os.path.join(extensions_dir, "ops", "mps", "*.mm"))
    #     sources += source_mps
216

217
218
219
    if sys.platform == "win32":
        define_macros += [("torchvision_EXPORTS", None)]
        extra_compile_args["cxx"].append("/MP")
Francisco Massa's avatar
Francisco Massa committed
220

221
    if debug_mode:
222
        print("Compiling in debug mode")
223
224
        extra_compile_args["cxx"].append("-g")
        extra_compile_args["cxx"].append("-O0")
225
226
227
        if "nvcc" in extra_compile_args:
            # we have to remove "-OX" and "-g" flag if exists and append
            nvcc_flags = extra_compile_args["nvcc"]
228
            extra_compile_args["nvcc"] = [f for f in nvcc_flags if not ("-O" in f or "-g" in f)]
229
230
            extra_compile_args["nvcc"].append("-O0")
            extra_compile_args["nvcc"].append("-g")
231
232
233
    else:
        print("Compiling with debug mode OFF")
        extra_compile_args["cxx"].append("-g0")
234

235
236
    sources = [os.path.join(extensions_dir, s) for s in sources]

237
    include_dirs = [extensions_dir]
238
239
240

    ext_modules = [
        extension(
241
            "torchvision._C",
242
            sorted(sources),
243
244
            include_dirs=include_dirs,
            define_macros=define_macros,
Soumith Chintala's avatar
Soumith Chintala committed
245
            extra_compile_args=extra_compile_args,
246
        )
247
    ]
248

249
    # ------------------- Torchvision extra extensions ------------------------
250
251
252
253
    vision_include = os.environ.get("TORCHVISION_INCLUDE", None)
    vision_library = os.environ.get("TORCHVISION_LIBRARY", None)
    vision_include = vision_include.split(os.pathsep) if vision_include is not None else []
    vision_library = vision_library.split(os.pathsep) if vision_library is not None else []
254
255
256
257
258
259
260
261
262
263
    include_dirs += vision_include
    library_dirs = vision_library

    # Image reading extension
    image_macros = []
    image_include = [extensions_dir]
    image_library = []
    image_link_flags = []

    # Locating libPNG
264
265
    libpng = shutil.which("libpng-config")
    pngfix = shutil.which("pngfix")
266
    png_found = libpng is not None or pngfix is not None
267
268
269
270

    use_png = use_png and png_found
    if use_png:
        print("Found PNG library")
271
272
        if libpng is not None:
            # Linux / Mac
273
            min_version = "1.6.0"
274
275
            png_version = subprocess.run([libpng, "--version"], stdout=subprocess.PIPE)
            png_version = png_version.stdout.strip().decode("utf-8")
276
            png_version = parse_version(png_version)
277
            if png_version >= parse_version(min_version):
278
279
280
281
                print("Building torchvision with PNG image support")
                png_lib = subprocess.run([libpng, "--libdir"], stdout=subprocess.PIPE)
                png_lib = png_lib.stdout.strip().decode("utf-8")
                if "disabled" not in png_lib:
282
                    image_library += [png_lib]
283
284
285
                png_include = subprocess.run([libpng, "--I_opts"], stdout=subprocess.PIPE)
                png_include = png_include.stdout.strip().decode("utf-8")
                _, png_include = png_include.split("-I")
286
                image_include += [png_include]
287
                image_link_flags.append("png")
288
289
                print(f"  libpng version: {png_version}")
                print(f"  libpng include path: {png_include}")
290
            else:
291
292
293
                print("Could not add PNG image support to torchvision:")
                print(f"  libpng minimum version {min_version}, found {png_version}")
                use_png = False
294
295
        else:
            # Windows
296
297
            png_lib = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "lib")
            png_include = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "include", "libpng16")
298
299
            image_library += [png_lib]
            image_include += [png_include]
300
            image_link_flags.append("libpng")
301
302
303
    else:
        print("Building torchvision without PNG image support")
    image_macros += [("PNG_FOUND", str(int(use_png)))]
304

305
    # Locating libjpeg
306
    (jpeg_found, jpeg_conda, jpeg_include, jpeg_lib) = find_library("jpeglib", vision_include)
307

308
309
    use_jpeg = use_jpeg and jpeg_found
    if use_jpeg:
310
        print("Building torchvision with JPEG image support")
Nicolas Hug's avatar
Nicolas Hug committed
311
312
        print(f"  libjpeg include path: {jpeg_include}")
        print(f"  libjpeg lib path: {jpeg_lib}")
313
        image_link_flags.append("jpeg")
314
315
316
        if jpeg_conda:
            image_library += [jpeg_lib]
            image_include += [jpeg_include]
317
318
319
    else:
        print("Building torchvision without JPEG image support")
    image_macros += [("JPEG_FOUND", str(int(use_jpeg)))]
320

321
322
323
    # Locating nvjpeg
    # Should be included in CUDA_HOME for CUDA >= 10.1, which is the minimum version we have in the CI
    nvjpeg_found = (
324
325
326
        extension is CUDAExtension
        and CUDA_HOME is not None
        and os.path.exists(os.path.join(CUDA_HOME, "include", "nvjpeg.h"))
327
328
    )

329
330
    use_nvjpeg = use_nvjpeg and nvjpeg_found
    if use_nvjpeg:
331
332
        print("Building torchvision with NVJPEG image support")
        image_link_flags.append("nvjpeg")
333
334
335
    else:
        print("Building torchvision without NVJPEG image support")
    image_macros += [("NVJPEG_FOUND", str(int(use_nvjpeg)))]
336
337

    image_path = os.path.join(extensions_dir, "io", "image")
Nicolas Hug's avatar
Nicolas Hug committed
338
339
340
341
342
    image_src = (
        glob.glob(os.path.join(image_path, "*.cpp"))
        + glob.glob(os.path.join(image_path, "cpu", "*.cpp"))
        + glob.glob(os.path.join(image_path, "cpu", "giflib", "*.c"))
    )
343

344
345
346
347
348
349
350
    if is_rocm_pytorch:
        image_src += glob.glob(os.path.join(image_path, "hip", "*.cpp"))
        # we need to exclude this in favor of the hipified source
        image_src.remove(os.path.join(image_path, "image.cpp"))
    else:
        image_src += glob.glob(os.path.join(image_path, "cuda", "*.cpp"))

Nicolas Hug's avatar
Nicolas Hug committed
351
352
353
354
355
356
357
358
359
    ext_modules.append(
        extension(
            "torchvision.image",
            image_src,
            include_dirs=image_include + include_dirs + [image_path],
            library_dirs=image_library + library_dirs,
            define_macros=image_macros,
            libraries=image_link_flags,
            extra_compile_args=extra_compile_args,
360
        )
Nicolas Hug's avatar
Nicolas Hug committed
361
    )
362

363
    # Locating ffmpeg
364
    ffmpeg_exe = shutil.which("ffmpeg")
365
    has_ffmpeg = ffmpeg_exe is not None
366
    ffmpeg_version = None
367
368
369
370
    # FIXME: Building torchvision with ffmpeg on MacOS or with Python 3.9
    # FIXME: causes crash. See the following GitHub issues for more details.
    # FIXME: https://github.com/pytorch/pytorch/issues/65000
    # FIXME: https://github.com/pytorch/vision/issues/3367
371
    if sys.platform != "linux" or (sys.version_info.major == 3 and sys.version_info.minor == 9):
372
        has_ffmpeg = False
373
374
    if has_ffmpeg:
        try:
375
            # This is to check if ffmpeg is installed properly.
376
            ffmpeg_version = subprocess.check_output(["ffmpeg", "-version"])
377
        except subprocess.CalledProcessError:
378
379
            print("Building torchvision without ffmpeg support")
            print("  Error fetching ffmpeg version, ignoring ffmpeg.")
380
381
            has_ffmpeg = False

382
    use_ffmpeg = use_ffmpeg and has_ffmpeg
383

384
    if use_ffmpeg:
385
        ffmpeg_libraries = {"libavcodec", "libavformat", "libavutil", "libswresample", "libswscale"}
386

387
388
        ffmpeg_bin = os.path.dirname(ffmpeg_exe)
        ffmpeg_root = os.path.dirname(ffmpeg_bin)
389
390
        ffmpeg_include_dir = os.path.join(ffmpeg_root, "include")
        ffmpeg_library_dir = os.path.join(ffmpeg_root, "lib")
391

392
        gcc = os.environ.get("CC", shutil.which("gcc"))
393
394
        platform_tag = subprocess.run([gcc, "-print-multiarch"], stdout=subprocess.PIPE)
        platform_tag = platform_tag.stdout.strip().decode("utf-8")
395
396
397

        if platform_tag:
            # Most probably a Debian-based distribution
398
399
            ffmpeg_include_dir = [ffmpeg_include_dir, os.path.join(ffmpeg_include_dir, platform_tag)]
            ffmpeg_library_dir = [ffmpeg_library_dir, os.path.join(ffmpeg_library_dir, platform_tag)]
400
401
402
403
404
405
406
        else:
            ffmpeg_include_dir = [ffmpeg_include_dir]
            ffmpeg_library_dir = [ffmpeg_library_dir]

        for library in ffmpeg_libraries:
            library_found = False
            for search_path in ffmpeg_include_dir + include_dirs:
407
                full_path = os.path.join(search_path, library, "*.h")
408
409
410
                library_found |= len(glob.glob(full_path)) > 0

            if not library_found:
411
412
413
414
415
                print("Building torchvision without ffmpeg support")
                print(f"  {library} header files were not found, disabling ffmpeg support")
                use_ffmpeg = False
    else:
        print("Building torchvision without ffmpeg support")
416

417
418
419
420
421
    if use_ffmpeg:
        print("Building torchvision with ffmpeg support")
        print(f"  ffmpeg version: {ffmpeg_version}")
        print(f"  ffmpeg include path: {ffmpeg_include_dir}")
        print(f"  ffmpeg library_dir: {ffmpeg_library_dir}")
422
423

        # TorchVision base decoder + video reader
424
        video_reader_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video_reader")
425
        video_reader_src = glob.glob(os.path.join(video_reader_src_dir, "*.cpp"))
426
427
        base_decoder_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "decoder")
        base_decoder_src = glob.glob(os.path.join(base_decoder_src_dir, "*.cpp"))
428
        # Torchvision video API
429
        videoapi_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video")
430
        videoapi_src = glob.glob(os.path.join(videoapi_src_dir, "*.cpp"))
431
        # exclude tests
432
        base_decoder_src = [x for x in base_decoder_src if "_test.cpp" not in x]
433

434
        combined_src = video_reader_src + base_decoder_src + videoapi_src
435

436
437
        ext_modules.append(
            CppExtension(
438
                "torchvision.video_reader",
439
                combined_src,
440
                include_dirs=[
441
                    base_decoder_src_dir,
442
                    video_reader_src_dir,
443
                    videoapi_src_dir,
444
                    extensions_dir,
445
                    *ffmpeg_include_dir,
446
                    *include_dirs,
447
                ],
448
                library_dirs=ffmpeg_library_dir + library_dirs,
449
                libraries=[
450
451
452
453
454
                    "avcodec",
                    "avformat",
                    "avutil",
                    "swresample",
                    "swscale",
455
                ],
456
457
                extra_compile_args=["-std=c++17"] if os.name != "nt" else ["/std:c++17", "/MP"],
                extra_link_args=["-std=c++17" if os.name != "nt" else "/std:c++17"],
458
459
            )
        )
460

Prabhat Roy's avatar
Prabhat Roy committed
461
462
463
464
465
466
467
468
469
470
471
472
    # Locating video codec
    # CUDA_HOME should be set to the cuda root directory.
    # TORCHVISION_INCLUDE and TORCHVISION_LIBRARY should include the location to
    # video codec header files and libraries respectively.
    video_codec_found = (
        extension is CUDAExtension
        and CUDA_HOME is not None
        and any([os.path.exists(os.path.join(folder, "cuviddec.h")) for folder in vision_include])
        and any([os.path.exists(os.path.join(folder, "nvcuvid.h")) for folder in vision_include])
        and any([os.path.exists(os.path.join(folder, "libnvcuvid.so")) for folder in library_dirs])
    )

473
    use_video_codec = use_video_codec and video_codec_found
Prabhat Roy's avatar
Prabhat Roy committed
474
    if (
475
476
        use_video_codec
        and use_ffmpeg
Prabhat Roy's avatar
Prabhat Roy committed
477
478
        and any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
    ):
479
        print("Building torchvision with video codec support")
Prabhat Roy's avatar
Prabhat Roy committed
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
        gpu_decoder_path = os.path.join(extensions_dir, "io", "decoder", "gpu")
        gpu_decoder_src = glob.glob(os.path.join(gpu_decoder_path, "*.cpp"))
        cuda_libs = os.path.join(CUDA_HOME, "lib64")
        cuda_inc = os.path.join(CUDA_HOME, "include")

        ext_modules.append(
            extension(
                "torchvision.Decoder",
                gpu_decoder_src,
                include_dirs=include_dirs + [gpu_decoder_path] + [cuda_inc] + ffmpeg_include_dir,
                library_dirs=ffmpeg_library_dir + library_dirs + [cuda_libs],
                libraries=[
                    "avcodec",
                    "avformat",
                    "avutil",
                    "swresample",
                    "swscale",
                    "nvcuvid",
                    "cuda",
                    "cudart",
                    "z",
                    "pthread",
                    "dl",
503
                    "nppicc",
Prabhat Roy's avatar
Prabhat Roy committed
504
505
506
507
508
                ],
                extra_compile_args=extra_compile_args,
            )
        )
    else:
509
510
511
512
513
514
515
516
517
518
519
        print("Building torchvision without video codec support")
        if (
            use_video_codec
            and use_ffmpeg
            and not any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
        ):
            print(
                "  The installed version of ffmpeg is missing the header file 'bsf.h' which is "
                "  required for GPU video decoding. Please install the latest ffmpeg from conda-forge channel:"
                "   `conda install -c conda-forge ffmpeg`."
            )
Prabhat Roy's avatar
Prabhat Roy committed
520

521
522
523
524
525
    return ext_modules


class clean(distutils.command.clean.clean):
    def run(self):
526
        with open(".gitignore") as f:
527
            ignores = f.read()
528
            for wildcard in filter(None, ignores.split("\n")):
529
530
531
532
533
534
535
536
537
538
                for filename in glob.glob(wildcard):
                    try:
                        os.remove(filename)
                    except OSError:
                        shutil.rmtree(filename, ignore_errors=True)

        # It's an old-style class in Python 2.7...
        distutils.command.clean.clean.run(self)


539
if __name__ == "__main__":
540
    print(f"Building wheel {package_name}-{version}")
541
542
543

    write_version_file()

544
    with open("README.md") as f:
545
546
547
548
549
550
        readme = f.read()

    setup(
        # Metadata
        name=package_name,
        version=version,
551
552
553
554
        author="PyTorch Core Team",
        author_email="soumith@pytorch.org",
        url="https://github.com/pytorch/vision",
        description="image and video datasets and models for torch deep learning",
555
        long_description=readme,
556
        long_description_content_type="text/markdown",
557
        license="BSD",
558
        # Package info
559
        packages=find_packages(exclude=("test",)),
560
        package_data={package_name: ["*.dll", "*.dylib", "*.so", "prototype/datasets/_builtin/*.categories"]},
561
562
563
        zip_safe=False,
        install_requires=requirements,
        extras_require={
564
            "gdown": ["gdown>=4.7.3"],
565
566
567
            "scipy": ["scipy"],
        },
        ext_modules=get_extensions(),
568
        python_requires=">=3.8",
569
        cmdclass={
570
571
572
            "build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
            "clean": clean,
        },
573
    )