setup.py 2.51 KB
Newer Older
anton's avatar
anton committed
1
2
3
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
anton's avatar
anton committed
4
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
5

anton's avatar
anton committed
6
7
8
9
10

with open('requirements.txt') as f:
    requirements = f.read().splitlines()


anton's avatar
anton committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
long_description = """
This package implements an efficient parallel algorithm for the computation of discounted cumulative sums 
with differentiable bindings to PyTorch. The `cumsum` operation is frequently seen in data science 
domains concerned with time series, including Reinforcement Learning (RL). 

The traditional sequential algorithm performs the computation of the output elements in a loop. For an input of size 
`N`, it requires `O(N)` operations and takes `O(N)` time steps to complete. 

The proposed parallel algorithm requires a total of `O(N log N)` operations, but takes only `O(log N)` time, which is a 
considerable trade-off in many applications involving large inputs.  

Features of the parallel algorithm:
- Speed logarithmic in the input size
- Better numerical precision than sequential algorithms

Features of the package:
- CPU: sequential algorithm in C++
- GPU: parallel algorithm in CUDA
- Gradients computation wrt input
- Both left and right directions of summation supported
- PyTorch bindings

Find more details and the most up-to-date information on the project webpage:
https://www.github.com/toshas/torch-discounted-cumsum
"""


38
39
setup(
    name='torch_discounted_cumsum',
anton's avatar
anton committed
40
    version='1.0.2',
anton's avatar
anton committed
41
    description='Fast discounted cumulative sums in PyTorch',
anton's avatar
anton committed
42
43
    long_description=long_description,
    long_description_content_type='text/markdown',
anton's avatar
anton committed
44
45
46
47
48
49
    install_requires=requirements,
    python_requires='>=3.6',
    packages=find_packages(),
    author='Anton Obukhov',
    license='BSD',
    url='https://www.github.com/toshas/torch-discounted-cumsum',
50
    ext_modules=[
anton's avatar
anton committed
51
52
53
54
55
56
57
58
59
60
61
62
63
        CppExtension(
            'torch_discounted_cumsum_cpu',
            [
                os.path.join('torch_discounted_cumsum', 'discounted_cumsum_cpu.cpp'),
            ],
        ),
        CUDAExtension(
            'torch_discounted_cumsum_cuda',
            [
                os.path.join('torch_discounted_cumsum', 'discounted_cumsum_cuda.cpp'),
                os.path.join('torch_discounted_cumsum', 'discounted_cumsum_cuda_kernel.cu'),
            ],
        )
64
65
66
    ],
    cmdclass={
        'build_ext': BuildExtension
anton's avatar
anton committed
67
68
69
70
71
72
    },
    keywords=[
        'pytorch', 'discounted', 'cumsum', 'cumulative', 'sum', 'scan', 'differentiable',
        'reinforcement', 'learning', 'rewards', 'time', 'series'
    ],
)