setup.py 21.8 KB
Newer Older
1
import distutils.command.clean
2
import distutils.spawn
3
import glob
4
import os
5
import shutil
6
7
import subprocess
import sys
8
9

import torch
10
11
12
from pkg_resources import DistributionNotFound, get_distribution, parse_version
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDA_HOME, CUDAExtension
soumith's avatar
soumith committed
13
14


Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
15
def read(*names, **kwargs):
16
    with open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp:
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
17
18
        return fp.read()

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
19

20
21
22
23
24
25
26
def get_dist(pkgname):
    try:
        return get_distribution(pkgname)
    except DistributionNotFound:
        return None


27
28
cwd = os.path.dirname(os.path.abspath(__file__))

29
version_txt = os.path.join(cwd, "version.txt")
30
with open(version_txt) as f:
31
    version = f.readline().strip()
32
33
sha = "Unknown"
package_name = "torchvision"
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
34

35
try:
36
    sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip()
37
38
39
except Exception:
    pass

40
41
42
43
if os.getenv("BUILD_VERSION"):
    version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
    version += "+" + sha[:7]
44
45
46


def write_version_file():
47
48
    version_path = os.path.join(cwd, "torchvision", "version.py")
    with open(version_path, "w") as f:
49
50
        f.write(f"__version__ = '{version}'\n")
        f.write(f"git_version = {repr(sha)}\n")
51
52
53
        f.write("from torchvision.extension import _check_cuda_version\n")
        f.write("if _check_cuda_version() > 0:\n")
        f.write("    cuda = _check_cuda_version()\n")
54
55


56
57
58
pytorch_dep = "torch"
if os.getenv("PYTORCH_VERSION"):
    pytorch_dep += "==" + os.getenv("PYTORCH_VERSION")
soumith's avatar
soumith committed
59

60
requirements = [
61
    "numpy",
62
    pytorch_dep,
63
64
]

65
66
# Excluding 8.3.* because of https://github.com/pytorch/vision/issues/4934
pillow_ver = " >= 5.3.0, !=8.3.*"
67
pillow_req = "pillow-simd" if get_dist("pillow-simd") is not None else "pillow"
68
69
requirements.append(pillow_req + pillow_ver)

70

71
72
def find_library(name, vision_include):
    this_dir = os.path.dirname(os.path.abspath(__file__))
73
    build_prefix = os.environ.get("BUILD_PREFIX", None)
74
75
76
77
78
79
    is_conda_build = build_prefix is not None

    library_found = False
    conda_installed = False
    lib_folder = None
    include_folder = None
80
    library_header = f"{name}.h"
81

82
    # Lookup in TORCHVISION_INCLUDE or in the package file
83
    package_path = [os.path.join(this_dir, "torchvision")]
84
85
86
87
88
89
90
    for folder in vision_include + package_path:
        candidate_path = os.path.join(folder, library_header)
        library_found = os.path.exists(candidate_path)
        if library_found:
            break

    if not library_found:
91
        print(f"Running build on conda-build: {is_conda_build}")
92
93
        if is_conda_build:
            # Add conda headers/libraries
94
95
96
97
98
            if os.name == "nt":
                build_prefix = os.path.join(build_prefix, "Library")
            include_folder = os.path.join(build_prefix, "include")
            lib_folder = os.path.join(build_prefix, "lib")
            library_header_path = os.path.join(include_folder, library_header)
99
100
101
            library_found = os.path.isfile(library_header_path)
            conda_installed = library_found
        else:
102
            # Check if using Anaconda to produce wheels
103
            conda = shutil.which("conda")
104
            is_conda = conda is not None
105
            print(f"Running build on conda: {is_conda}")
106
107
108
            if is_conda:
                python_executable = sys.executable
                py_folder = os.path.dirname(python_executable)
109
110
                if os.name == "nt":
                    env_path = os.path.join(py_folder, "Library")
111
112
                else:
                    env_path = os.path.dirname(py_folder)
113
114
115
                lib_folder = os.path.join(env_path, "lib")
                include_folder = os.path.join(env_path, "include")
                library_header_path = os.path.join(include_folder, library_header)
116
117
118
119
                library_found = os.path.isfile(library_header_path)
                conda_installed = library_found

        if not library_found:
120
            if sys.platform == "linux":
121
122
                library_found = os.path.exists(f"/usr/include/{library_header}")
                library_found = library_found or os.path.exists(f"/usr/local/include/{library_header}")
123
124
125
126

    return library_found, conda_installed, include_folder, lib_folder


127
128
def get_extensions():
    this_dir = os.path.dirname(os.path.abspath(__file__))
129
    extensions_dir = os.path.join(this_dir, "torchvision", "csrc")
130

131
132
133
134
    main_file = (
        glob.glob(os.path.join(extensions_dir, "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "autocast", "*.cpp"))
135
    )
136
    source_cpu = (
137
138
139
        glob.glob(os.path.join(extensions_dir, "ops", "autograd", "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "cpu", "*.cpp"))
        + glob.glob(os.path.join(extensions_dir, "ops", "quantized", "cpu", "*.cpp"))
140
    )
141
    source_mps = glob.glob(os.path.join(extensions_dir, "ops", "mps", "*.mm"))
142

143
144
145
    print("Compiling extensions with following flags:")
    force_cuda = os.getenv("FORCE_CUDA", "0") == "1"
    print(f"  FORCE_CUDA: {force_cuda}")
146
147
    force_mps = os.getenv("FORCE_MPS", "0") == "1"
    print(f"  FORCE_MPS: {force_mps}")
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
    debug_mode = os.getenv("DEBUG", "0") == "1"
    print(f"  DEBUG: {debug_mode}")
    use_png = os.getenv("TORCHVISION_USE_PNG", "1") == "1"
    print(f"  TORCHVISION_USE_PNG: {use_png}")
    use_jpeg = os.getenv("TORCHVISION_USE_JPEG", "1") == "1"
    print(f"  TORCHVISION_USE_JPEG: {use_jpeg}")
    use_nvjpeg = os.getenv("TORCHVISION_USE_NVJPEG", "1") == "1"
    print(f"  TORCHVISION_USE_NVJPEG: {use_nvjpeg}")
    use_ffmpeg = os.getenv("TORCHVISION_USE_FFMPEG", "1") == "1"
    print(f"  TORCHVISION_USE_FFMPEG: {use_ffmpeg}")
    use_video_codec = os.getenv("TORCHVISION_USE_VIDEO_CODEC", "1") == "1"
    print(f"  TORCHVISION_USE_VIDEO_CODEC: {use_video_codec}")

    nvcc_flags = os.getenv("NVCC_FLAGS", "")
    print(f"  NVCC_FLAGS: {nvcc_flags}")

164
    is_rocm_pytorch = False
165
166

    if torch.__version__ >= "1.5":
167
        from torch.utils.cpp_extension import ROCM_HOME
168

169
        is_rocm_pytorch = (torch.version.hip is not None) and (ROCM_HOME is not None)
170
171

    if is_rocm_pytorch:
172
        from torch.utils.hipify import hipify_python
173

174
175
176
        hipify_python.hipify(
            project_directory=this_dir,
            output_directory=this_dir,
177
            includes="torchvision/csrc/ops/cuda/*",
178
179
            show_detailed=True,
            is_pytorch_extension=True,
180
        )
181
        source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "hip", "*.hip"))
182
        # Copy over additional files
183
184
        for file in glob.glob(r"torchvision/csrc/ops/cuda/*.h"):
            shutil.copy(file, "torchvision/csrc/ops/hip")
185
    else:
186
        source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "cuda", "*.cu"))
187

188
189
190
191
192
    sources = main_file + source_cpu
    extension = CppExtension

    define_macros = []

193
    extra_compile_args = {"cxx": []}
194
    if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or force_cuda:
195
196
        extension = CUDAExtension
        sources += source_cuda
197
        if not is_rocm_pytorch:
198
199
            define_macros += [("WITH_CUDA", None)]
            if nvcc_flags == "":
200
201
                nvcc_flags = []
            else:
202
                nvcc_flags = nvcc_flags.split(" ")
Soumith Chintala's avatar
Soumith Chintala committed
203
        else:
204
            define_macros += [("WITH_HIP", None)]
205
            nvcc_flags = []
206
        extra_compile_args["nvcc"] = nvcc_flags
207
208
    elif torch.backends.mps.is_available() or force_mps:
        sources += source_mps
209

210
211
    if sys.platform == "win32":
        define_macros += [("torchvision_EXPORTS", None)]
212
        define_macros += [("USE_PYTHON", None)]
213
        extra_compile_args["cxx"].append("/MP")
Francisco Massa's avatar
Francisco Massa committed
214

215
    if debug_mode:
216
        print("Compiling in debug mode")
217
218
        extra_compile_args["cxx"].append("-g")
        extra_compile_args["cxx"].append("-O0")
219
220
221
        if "nvcc" in extra_compile_args:
            # we have to remove "-OX" and "-g" flag if exists and append
            nvcc_flags = extra_compile_args["nvcc"]
222
            extra_compile_args["nvcc"] = [f for f in nvcc_flags if not ("-O" in f or "-g" in f)]
223
224
            extra_compile_args["nvcc"].append("-O0")
            extra_compile_args["nvcc"].append("-g")
225
226
227
    else:
        print("Compiling with debug mode OFF")
        extra_compile_args["cxx"].append("-g0")
228

229
230
    sources = [os.path.join(extensions_dir, s) for s in sources]

231
    include_dirs = [extensions_dir]
232
233
234

    ext_modules = [
        extension(
235
            "torchvision._C",
236
            sorted(sources),
237
238
            include_dirs=include_dirs,
            define_macros=define_macros,
Soumith Chintala's avatar
Soumith Chintala committed
239
            extra_compile_args=extra_compile_args,
240
        )
241
    ]
242

243
    # ------------------- Torchvision extra extensions ------------------------
244
245
246
247
    vision_include = os.environ.get("TORCHVISION_INCLUDE", None)
    vision_library = os.environ.get("TORCHVISION_LIBRARY", None)
    vision_include = vision_include.split(os.pathsep) if vision_include is not None else []
    vision_library = vision_library.split(os.pathsep) if vision_library is not None else []
248
249
250
251
252
253
254
255
256
    include_dirs += vision_include
    library_dirs = vision_library

    # Image reading extension
    image_macros = []
    image_include = [extensions_dir]
    image_library = []
    image_link_flags = []

257
258
259
    if sys.platform == "win32":
        image_macros += [("USE_PYTHON", None)]

260
    # Locating libPNG
261
262
    libpng = shutil.which("libpng-config")
    pngfix = shutil.which("pngfix")
263
    png_found = libpng is not None or pngfix is not None
264
265
266
267

    use_png = use_png and png_found
    if use_png:
        print("Found PNG library")
268
269
        if libpng is not None:
            # Linux / Mac
270
            min_version = "1.6.0"
271
272
            png_version = subprocess.run([libpng, "--version"], stdout=subprocess.PIPE)
            png_version = png_version.stdout.strip().decode("utf-8")
273
            png_version = parse_version(png_version)
274
            if png_version >= parse_version(min_version):
275
276
277
278
                print("Building torchvision with PNG image support")
                png_lib = subprocess.run([libpng, "--libdir"], stdout=subprocess.PIPE)
                png_lib = png_lib.stdout.strip().decode("utf-8")
                if "disabled" not in png_lib:
279
                    image_library += [png_lib]
280
281
282
                png_include = subprocess.run([libpng, "--I_opts"], stdout=subprocess.PIPE)
                png_include = png_include.stdout.strip().decode("utf-8")
                _, png_include = png_include.split("-I")
283
                image_include += [png_include]
284
                image_link_flags.append("png")
285
286
                print(f"  libpng version: {png_version}")
                print(f"  libpng include path: {png_include}")
287
            else:
288
289
290
                print("Could not add PNG image support to torchvision:")
                print(f"  libpng minimum version {min_version}, found {png_version}")
                use_png = False
291
292
        else:
            # Windows
293
294
            png_lib = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "lib")
            png_include = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "include", "libpng16")
295
296
            image_library += [png_lib]
            image_include += [png_include]
297
            image_link_flags.append("libpng")
298
299
300
    else:
        print("Building torchvision without PNG image support")
    image_macros += [("PNG_FOUND", str(int(use_png)))]
301

302
    # Locating libjpeg
303
    (jpeg_found, jpeg_conda, jpeg_include, jpeg_lib) = find_library("jpeglib", vision_include)
304

305
306
    use_jpeg = use_jpeg and jpeg_found
    if use_jpeg:
307
        print("Building torchvision with JPEG image support")
Nicolas Hug's avatar
Nicolas Hug committed
308
309
        print(f"  libjpeg include path: {jpeg_include}")
        print(f"  libjpeg lib path: {jpeg_lib}")
310
        image_link_flags.append("jpeg")
311
312
313
        if jpeg_conda:
            image_library += [jpeg_lib]
            image_include += [jpeg_include]
314
315
316
    else:
        print("Building torchvision without JPEG image support")
    image_macros += [("JPEG_FOUND", str(int(use_jpeg)))]
317

318
319
320
    # Locating nvjpeg
    # Should be included in CUDA_HOME for CUDA >= 10.1, which is the minimum version we have in the CI
    nvjpeg_found = (
321
322
323
        extension is CUDAExtension
        and CUDA_HOME is not None
        and os.path.exists(os.path.join(CUDA_HOME, "include", "nvjpeg.h"))
324
325
    )

326
327
    use_nvjpeg = use_nvjpeg and nvjpeg_found
    if use_nvjpeg:
328
329
        print("Building torchvision with NVJPEG image support")
        image_link_flags.append("nvjpeg")
330
331
332
    else:
        print("Building torchvision without NVJPEG image support")
    image_macros += [("NVJPEG_FOUND", str(int(use_nvjpeg)))]
333
334

    image_path = os.path.join(extensions_dir, "io", "image")
Philip Meier's avatar
Philip Meier committed
335
    image_src = glob.glob(os.path.join(image_path, "*.cpp")) + glob.glob(os.path.join(image_path, "cpu", "*.cpp"))
336

337
338
339
340
341
342
343
    if is_rocm_pytorch:
        image_src += glob.glob(os.path.join(image_path, "hip", "*.cpp"))
        # we need to exclude this in favor of the hipified source
        image_src.remove(os.path.join(image_path, "image.cpp"))
    else:
        image_src += glob.glob(os.path.join(image_path, "cuda", "*.cpp"))

344
    if use_png or use_jpeg:
345
346
347
348
349
350
351
352
353
354
355
356
        ext_modules.append(
            extension(
                "torchvision.image",
                image_src,
                include_dirs=image_include + include_dirs + [image_path],
                library_dirs=image_library + library_dirs,
                define_macros=image_macros,
                libraries=image_link_flags,
                extra_compile_args=extra_compile_args,
            )
        )

357
    # Locating ffmpeg
358
    ffmpeg_exe = shutil.which("ffmpeg")
359
    has_ffmpeg = ffmpeg_exe is not None
360
    ffmpeg_version = None
361
362
363
364
    # FIXME: Building torchvision with ffmpeg on MacOS or with Python 3.9
    # FIXME: causes crash. See the following GitHub issues for more details.
    # FIXME: https://github.com/pytorch/pytorch/issues/65000
    # FIXME: https://github.com/pytorch/vision/issues/3367
365
    if sys.platform != "linux" or (sys.version_info.major == 3 and sys.version_info.minor == 9):
366
        has_ffmpeg = False
367
368
    if has_ffmpeg:
        try:
369
            # This is to check if ffmpeg is installed properly.
370
            ffmpeg_version = subprocess.check_output(["ffmpeg", "-version"])
371
        except subprocess.CalledProcessError:
372
373
            print("Building torchvision without ffmpeg support")
            print("  Error fetching ffmpeg version, ignoring ffmpeg.")
374
375
            has_ffmpeg = False

376
    use_ffmpeg = use_ffmpeg and has_ffmpeg
377

378
    if use_ffmpeg:
379
        ffmpeg_libraries = {"libavcodec", "libavformat", "libavutil", "libswresample", "libswscale"}
380

381
382
        ffmpeg_bin = os.path.dirname(ffmpeg_exe)
        ffmpeg_root = os.path.dirname(ffmpeg_bin)
383
384
        ffmpeg_include_dir = os.path.join(ffmpeg_root, "include")
        ffmpeg_library_dir = os.path.join(ffmpeg_root, "lib")
385

386
        gcc = os.environ.get("CC", shutil.which("gcc"))
387
388
        platform_tag = subprocess.run([gcc, "-print-multiarch"], stdout=subprocess.PIPE)
        platform_tag = platform_tag.stdout.strip().decode("utf-8")
389
390
391

        if platform_tag:
            # Most probably a Debian-based distribution
392
393
            ffmpeg_include_dir = [ffmpeg_include_dir, os.path.join(ffmpeg_include_dir, platform_tag)]
            ffmpeg_library_dir = [ffmpeg_library_dir, os.path.join(ffmpeg_library_dir, platform_tag)]
394
395
396
397
398
399
400
        else:
            ffmpeg_include_dir = [ffmpeg_include_dir]
            ffmpeg_library_dir = [ffmpeg_library_dir]

        for library in ffmpeg_libraries:
            library_found = False
            for search_path in ffmpeg_include_dir + include_dirs:
401
                full_path = os.path.join(search_path, library, "*.h")
402
403
404
                library_found |= len(glob.glob(full_path)) > 0

            if not library_found:
405
406
407
408
409
                print("Building torchvision without ffmpeg support")
                print(f"  {library} header files were not found, disabling ffmpeg support")
                use_ffmpeg = False
    else:
        print("Building torchvision without ffmpeg support")
410

411
412
413
414
415
    if use_ffmpeg:
        print("Building torchvision with ffmpeg support")
        print(f"  ffmpeg version: {ffmpeg_version}")
        print(f"  ffmpeg include path: {ffmpeg_include_dir}")
        print(f"  ffmpeg library_dir: {ffmpeg_library_dir}")
416
417

        # TorchVision base decoder + video reader
418
        video_reader_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video_reader")
419
        video_reader_src = glob.glob(os.path.join(video_reader_src_dir, "*.cpp"))
420
421
        base_decoder_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "decoder")
        base_decoder_src = glob.glob(os.path.join(base_decoder_src_dir, "*.cpp"))
422
        # Torchvision video API
423
        videoapi_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video")
424
        videoapi_src = glob.glob(os.path.join(videoapi_src_dir, "*.cpp"))
425
        # exclude tests
426
        base_decoder_src = [x for x in base_decoder_src if "_test.cpp" not in x]
427

428
        combined_src = video_reader_src + base_decoder_src + videoapi_src
429

430
431
        ext_modules.append(
            CppExtension(
432
                "torchvision.video_reader",
433
                combined_src,
434
                include_dirs=[
435
                    base_decoder_src_dir,
436
                    video_reader_src_dir,
437
                    videoapi_src_dir,
438
                    extensions_dir,
439
                    *ffmpeg_include_dir,
440
                    *include_dirs,
441
                ],
442
                library_dirs=ffmpeg_library_dir + library_dirs,
443
                libraries=[
444
445
446
447
448
                    "avcodec",
                    "avformat",
                    "avutil",
                    "swresample",
                    "swscale",
449
                ],
450
451
                extra_compile_args=["-std=c++17"] if os.name != "nt" else ["/std:c++17", "/MP"],
                extra_link_args=["-std=c++17" if os.name != "nt" else "/std:c++17"],
452
453
            )
        )
454

Prabhat Roy's avatar
Prabhat Roy committed
455
456
457
458
459
460
461
462
463
464
465
466
    # Locating video codec
    # CUDA_HOME should be set to the cuda root directory.
    # TORCHVISION_INCLUDE and TORCHVISION_LIBRARY should include the location to
    # video codec header files and libraries respectively.
    video_codec_found = (
        extension is CUDAExtension
        and CUDA_HOME is not None
        and any([os.path.exists(os.path.join(folder, "cuviddec.h")) for folder in vision_include])
        and any([os.path.exists(os.path.join(folder, "nvcuvid.h")) for folder in vision_include])
        and any([os.path.exists(os.path.join(folder, "libnvcuvid.so")) for folder in library_dirs])
    )

467
    use_video_codec = use_video_codec and video_codec_found
Prabhat Roy's avatar
Prabhat Roy committed
468
    if (
469
470
        use_video_codec
        and use_ffmpeg
Prabhat Roy's avatar
Prabhat Roy committed
471
472
        and any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
    ):
473
        print("Building torchvision with video codec support")
Prabhat Roy's avatar
Prabhat Roy committed
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
        gpu_decoder_path = os.path.join(extensions_dir, "io", "decoder", "gpu")
        gpu_decoder_src = glob.glob(os.path.join(gpu_decoder_path, "*.cpp"))
        cuda_libs = os.path.join(CUDA_HOME, "lib64")
        cuda_inc = os.path.join(CUDA_HOME, "include")

        ext_modules.append(
            extension(
                "torchvision.Decoder",
                gpu_decoder_src,
                include_dirs=include_dirs + [gpu_decoder_path] + [cuda_inc] + ffmpeg_include_dir,
                library_dirs=ffmpeg_library_dir + library_dirs + [cuda_libs],
                libraries=[
                    "avcodec",
                    "avformat",
                    "avutil",
                    "swresample",
                    "swscale",
                    "nvcuvid",
                    "cuda",
                    "cudart",
                    "z",
                    "pthread",
                    "dl",
497
                    "nppicc",
Prabhat Roy's avatar
Prabhat Roy committed
498
499
500
501
502
                ],
                extra_compile_args=extra_compile_args,
            )
        )
    else:
503
504
505
506
507
508
509
510
511
512
513
        print("Building torchvision without video codec support")
        if (
            use_video_codec
            and use_ffmpeg
            and not any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
        ):
            print(
                "  The installed version of ffmpeg is missing the header file 'bsf.h' which is "
                "  required for GPU video decoding. Please install the latest ffmpeg from conda-forge channel:"
                "   `conda install -c conda-forge ffmpeg`."
            )
Prabhat Roy's avatar
Prabhat Roy committed
514

515
516
517
518
519
    return ext_modules


class clean(distutils.command.clean.clean):
    def run(self):
520
        with open(".gitignore") as f:
521
            ignores = f.read()
522
            for wildcard in filter(None, ignores.split("\n")):
523
524
525
526
527
528
529
530
531
532
                for filename in glob.glob(wildcard):
                    try:
                        os.remove(filename)
                    except OSError:
                        shutil.rmtree(filename, ignore_errors=True)

        # It's an old-style class in Python 2.7...
        distutils.command.clean.clean.run(self)


533
if __name__ == "__main__":
534
    print(f"Building wheel {package_name}-{version}")
535
536
537

    write_version_file()

538
    with open("README.md") as f:
539
540
541
542
543
544
        readme = f.read()

    setup(
        # Metadata
        name=package_name,
        version=version,
545
546
547
548
        author="PyTorch Core Team",
        author_email="soumith@pytorch.org",
        url="https://github.com/pytorch/vision",
        description="image and video datasets and models for torch deep learning",
549
        long_description=readme,
550
        long_description_content_type="text/markdown",
551
        license="BSD",
552
        # Package info
553
        packages=find_packages(exclude=("test",)),
554
        package_data={package_name: ["*.dll", "*.dylib", "*.so", "prototype/datasets/_builtin/*.categories"]},
555
556
557
558
559
560
        zip_safe=False,
        install_requires=requirements,
        extras_require={
            "scipy": ["scipy"],
        },
        ext_modules=get_extensions(),
561
        python_requires=">=3.8",
562
        cmdclass={
563
564
565
            "build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
            "clean": clean,
        },
566
    )