Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-scatter
Commits
8b8df250
Commit
8b8df250
authored
Apr 28, 2018
by
rusty1s
Browse files
new pytorch 0.4.0 format
parent
367b0af0
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
179 additions
and
233 deletions
+179
-233
setup.py
setup.py
+1
-1
test/test_backward.py
test/test_backward.py
+12
-39
test/test_forward.py
test/test_forward.py
+43
-61
test/utils.py
test/utils.py
+12
-8
torch_scatter/__init__.py
torch_scatter/__init__.py
+3
-14
torch_scatter/add.py
torch_scatter/add.py
+90
-0
torch_scatter/functions/add.py
torch_scatter/functions/add.py
+0
-110
torch_scatter/utils/gen.py
torch_scatter/utils/gen.py
+18
-0
No files found.
setup.py
View file @
8b8df250
...
@@ -2,7 +2,7 @@ from os import path as osp
...
@@ -2,7 +2,7 @@ from os import path as osp
from
setuptools
import
setup
,
find_packages
from
setuptools
import
setup
,
find_packages
__version__
=
'
0.3
.0'
__version__
=
'
1.0
.0'
url
=
'https://github.com/rusty1s/pytorch_scatter'
url
=
'https://github.com/rusty1s/pytorch_scatter'
install_requires
=
[
'cffi'
]
install_requires
=
[
'cffi'
]
...
...
test/test_backward.py
View file @
8b8df250
from
os
import
path
as
osp
from
itertools
import
product
from
itertools
import
product
import
pytest
import
pytest
import
json
import
torch
import
torch
from
torch.autograd
import
Variable
as
V
from
torch.autograd
import
gradcheck
import
torch_scatter
import
torch_scatter
from
.utils
import
tensors
,
Tensor
from
.utils
import
devices
f
=
open
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'backward.json'
),
'r'
)
funcs
=
[
'add'
]
data
=
json
.
load
(
f
)
indices
=
[
2
,
0
,
1
,
1
,
0
]
f
.
close
()
@
pytest
.
mark
.
parametrize
(
'tensor,i'
,
product
(
tensors
,
range
(
len
(
data
))))
@
pytest
.
mark
.
parametrize
(
'func,device'
,
product
(
funcs
,
devices
))
def
test_backward_cpu
(
tensor
,
i
):
def
test_backward
(
func
,
device
):
name
=
data
[
i
][
'name'
]
index
=
torch
.
tensor
(
indices
,
dtype
=
torch
.
long
,
device
=
device
)
index
=
V
(
torch
.
LongTensor
(
data
[
i
][
'index'
]))
src
=
torch
.
rand
(
index
.
size
(),
dtype
=
torch
.
double
,
device
=
device
)
input
=
V
(
Tensor
(
tensor
,
data
[
i
][
'input'
]),
requires_grad
=
True
)
src
.
requires_grad_
()
dim
=
data
[
i
][
'dim'
]
fill_value
=
data
[
i
][
'fill_value'
]
grad
=
Tensor
(
tensor
,
data
[
i
][
'grad'
])
output
=
V
(
grad
.
new
(
grad
.
size
()).
fill_
(
fill_value
))
expected
=
Tensor
(
tensor
,
data
[
i
][
'expected'
])
func
=
getattr
(
torch_scatter
,
'scatter_{}_'
.
format
(
name
))
op
=
getattr
(
torch_scatter
,
'scatter_{}'
.
format
(
func
))
func
(
output
,
index
,
input
,
dim
)
data
=
(
src
,
index
)
output
.
backward
(
grad
)
assert
gradcheck
(
op
,
data
,
eps
=
1e-6
,
atol
=
1e-4
)
is
True
assert
input
.
grad
.
data
.
tolist
()
==
expected
.
tolist
()
@
pytest
.
mark
.
skipif
(
not
torch
.
cuda
.
is_available
(),
reason
=
'no CUDA'
)
@
pytest
.
mark
.
parametrize
(
'tensor,i'
,
product
(
tensors
,
range
(
len
(
data
))))
def
test_backward_gpu
(
tensor
,
i
):
# pragma: no cover
name
=
data
[
i
][
'name'
]
index
=
V
(
torch
.
cuda
.
LongTensor
(
data
[
i
][
'index'
]))
input
=
V
(
Tensor
(
tensor
,
data
[
i
][
'input'
]).
cuda
(),
requires_grad
=
True
)
dim
=
data
[
i
][
'dim'
]
fill_value
=
data
[
i
][
'fill_value'
]
grad
=
Tensor
(
tensor
,
data
[
i
][
'grad'
]).
cuda
()
output
=
V
(
grad
.
new
(
grad
.
size
()).
fill_
(
fill_value
).
cuda
())
expected
=
Tensor
(
tensor
,
data
[
i
][
'expected'
])
func
=
getattr
(
torch_scatter
,
'scatter_{}_'
.
format
(
name
))
func
(
output
,
index
,
input
,
dim
)
output
.
backward
(
grad
)
assert
input
.
grad
.
data
.
cpu
().
tolist
()
==
expected
.
tolist
()
test/test_forward.py
View file @
8b8df250
from
os
import
path
as
osp
from
itertools
import
product
from
itertools
import
product
import
pytest
import
pytest
import
json
import
torch
import
torch
import
torch_scatter
import
torch_scatter
from
.utils
import
tensors
,
Tensor
from
.utils
import
dtypes
,
devices
,
tensor
f
=
open
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'forward.json'
),
'r'
)
tests
=
[{
data
=
json
.
load
(
f
)
'name'
:
'add'
,
f
.
close
()
'src'
:
[[
2
,
0
,
1
,
4
,
3
],
[
0
,
2
,
1
,
3
,
4
]],
'index'
:
[[
4
,
5
,
4
,
2
,
3
],
[
0
,
0
,
2
,
2
,
1
]],
'fill_value'
:
0
,
@
pytest
.
mark
.
parametrize
(
'tensor,i'
,
product
(
tensors
,
range
(
len
(
data
))))
'expected'
:
[[
0
,
0
,
4
,
3
,
3
,
0
],
[
2
,
4
,
4
,
0
,
0
,
0
]]
def
test_forward_cpu
(
tensor
,
i
):
}]
name
=
data
[
i
][
'name'
]
index
=
torch
.
LongTensor
(
data
[
i
][
'index'
])
input
=
Tensor
(
tensor
,
data
[
i
][
'input'
])
@
pytest
.
mark
.
parametrize
(
'test,dtype,device'
,
product
(
tests
,
dtypes
,
devices
))
dim
=
data
[
i
][
'dim'
]
def
test_forward
(
test
,
dtype
,
device
):
fill_value
=
data
[
i
][
'fill_value'
]
src
=
tensor
(
test
[
'src'
],
dtype
,
device
)
expected
=
torch
.
FloatTensor
(
data
[
i
][
'expected'
]).
type_as
(
input
)
index
=
tensor
(
test
[
'index'
],
torch
.
long
,
device
)
output
=
expected
.
new
(
expected
.
size
()).
fill_
(
fill_value
)
op
=
getattr
(
torch_scatter
,
'scatter_{}'
.
format
(
test
[
'name'
]))
func
=
getattr
(
torch_scatter
,
'scatter_{}_'
.
format
(
name
))
output
=
op
(
src
,
index
,
fill_value
=
test
[
'fill_value'
])
result
=
func
(
output
,
index
,
input
,
dim
)
assert
output
.
tolist
()
==
expected
.
tolist
()
assert
output
.
tolist
()
==
test
[
'expected'
]
if
'expected_arg'
in
data
[
i
]:
# name = data[i]['name']
expected_arg
=
torch
.
LongTensor
(
data
[
i
][
'expected_arg'
])
# index = torch.LongTensor(data[i]['index'])
assert
result
[
1
].
tolist
()
==
expected_arg
.
tolist
()
# input = Tensor(tensor, data[i]['input'])
# dim = data[i]['dim']
func
=
getattr
(
torch_scatter
,
'scatter_{}'
.
format
(
name
))
# fill_value = data[i]['fill_value']
result
=
func
(
index
,
input
,
dim
,
fill_value
=
fill_value
)
# expected = torch.FloatTensor(data[i]['expected']).type_as(input)
if
'expected_arg'
not
in
data
[
i
]:
# output = expected.new(expected.size()).fill_(fill_value)
assert
result
.
tolist
()
==
expected
.
tolist
()
else
:
# func = getattr(torch_scatter, 'scatter_{}_'.format(name))
expected_arg
=
torch
.
LongTensor
(
data
[
i
][
'expected_arg'
])
# result = func(output, index, input, dim)
assert
result
[
0
].
tolist
()
==
expected
.
tolist
()
# assert output.tolist() == expected.tolist()
assert
result
[
1
].
tolist
()
==
expected_arg
.
tolist
()
# if 'expected_arg' in data[i]:
# expected_arg = torch.LongTensor(data[i]['expected_arg'])
# assert result[1].tolist() == expected_arg.tolist()
@
pytest
.
mark
.
skipif
(
not
torch
.
cuda
.
is_available
(),
reason
=
'no CUDA'
)
@
pytest
.
mark
.
parametrize
(
'tensor,i'
,
product
(
tensors
,
range
(
len
(
data
))))
# func = getattr(torch_scatter, 'scatter_{}'.format(name))
def
test_forward_gpu
(
tensor
,
i
):
# pragma: no cover
# result = func(index, input, dim, fill_value=fill_value)
name
=
data
[
i
][
'name'
]
# if 'expected_arg' not in data[i]:
index
=
torch
.
cuda
.
LongTensor
(
data
[
i
][
'index'
])
# assert result.tolist() == expected.tolist()
input
=
Tensor
(
tensor
,
data
[
i
][
'input'
]).
cuda
()
# else:
dim
=
data
[
i
][
'dim'
]
# expected_arg = torch.LongTensor(data[i]['expected_arg'])
fill_value
=
data
[
i
][
'fill_value'
]
# assert result[0].tolist() == expected.tolist()
expected
=
torch
.
FloatTensor
(
data
[
i
][
'expected'
]).
type_as
(
input
)
# assert result[1].tolist() == expected_arg.tolist()
output
=
expected
.
new
(
expected
.
size
()).
fill_
(
fill_value
).
cuda
()
func
=
getattr
(
torch_scatter
,
'scatter_{}_'
.
format
(
name
))
result
=
func
(
output
,
index
,
input
,
dim
)
assert
output
.
cpu
().
tolist
()
==
expected
.
tolist
()
if
'expected_arg'
in
data
[
i
]:
expected_arg
=
torch
.
LongTensor
(
data
[
i
][
'expected_arg'
])
assert
result
[
1
].
cpu
().
tolist
()
==
expected_arg
.
tolist
()
func
=
getattr
(
torch_scatter
,
'scatter_{}'
.
format
(
name
))
result
=
func
(
index
,
input
,
dim
,
fill_value
=
fill_value
)
if
'expected_arg'
not
in
data
[
i
]:
assert
result
.
cpu
().
tolist
()
==
expected
.
tolist
()
else
:
expected_arg
=
torch
.
LongTensor
(
data
[
i
][
'expected_arg'
])
assert
result
[
0
].
cpu
().
tolist
()
==
expected
.
tolist
()
assert
result
[
1
].
cpu
().
tolist
()
==
expected_arg
.
tolist
()
test/utils.py
View file @
8b8df250
import
torch
import
torch
from
torch.
_
te
nsor_docs
import
tensor_class
es
from
torch.te
sting
import
get_all_dtyp
es
tensors
=
[
t
[:
-
4
]
for
t
in
tensor_classes
]
dtypes
=
get_all_dtypes
()
tensors
.
remove
(
'ShortTensor'
)
# TODO: PyTorch `atomicAdd` bug with short type.
dtypes
.
remove
(
torch
.
half
)
tensors
.
remove
(
'ByteTensor'
)
# We cannot properly test unsigned values.
dtypes
.
remove
(
torch
.
short
)
# TODO: PyTorch `atomicAdd` bug with short type.
tensors
.
remove
(
'CharTensor'
)
# Overflow on gradient computations :(
dtypes
.
remove
(
torch
.
uint8
)
# We cannot properly test unsigned values.
dtypes
.
remove
(
torch
.
int8
)
# Overflow on gradient computations :(
devices
=
[
torch
.
device
(
'cpu'
)]
if
torch
.
cuda
.
is_available
():
# pragma: no cover
devices
+=
[
torch
.
device
(
'cuda:{}'
.
format
(
torch
.
cuda
.
current_device
()))]
def
Tensor
(
str
,
x
):
tensor
=
getattr
(
torch
,
str
)
def
tensor
(
x
,
dtype
,
device
):
return
tensor
(
x
)
return
None
if
x
is
None
else
torch
.
tensor
(
x
,
dtype
=
dtype
,
device
=
device
)
torch_scatter/__init__.py
View file @
8b8df250
from
.functions.add
import
scatter_add_
,
scatter_add
from
.add
import
ScatterAdd
,
scatter_add
from
.functions.sub
import
scatter_sub_
,
scatter_sub
from
.functions.mul
import
scatter_mul_
,
scatter_mul
from
.functions.div
import
scatter_div_
,
scatter_div
from
.functions.mean
import
scatter_mean_
,
scatter_mean
from
.functions.max
import
scatter_max_
,
scatter_max
from
.functions.min
import
scatter_min_
,
scatter_min
__version__
=
'
0.3
.0'
__version__
=
'
1.0
.0'
__all__
=
[
__all__
=
[
'ScatterAdd'
,
'scatter_add'
,
'__version__'
]
'scatter_add_'
,
'scatter_add'
,
'scatter_sub_'
,
'scatter_sub'
,
'scatter_mul_'
,
'scatter_mul'
,
'scatter_div_'
,
'scatter_div'
,
'scatter_mean_'
,
'scatter_mean'
,
'scatter_max_'
,
'scatter_max'
,
'scatter_min_'
,
'scatter_min'
,
'__version__'
]
torch_scatter/add.py
0 → 100644
View file @
8b8df250
from
torch.autograd
import
Function
from
.utils.gen
import
gen
class
ScatterAdd
(
Function
):
@
staticmethod
def
forward
(
ctx
,
out
,
src
,
index
,
dim
=-
1
):
ctx
.
mark_dirty
(
out
)
ctx
.
save_for_backward
(
index
)
return
out
.
scatter_add_
(
dim
,
index
,
src
)
@
staticmethod
def
backward
(
ctx
,
grad_out
):
index
,
=
ctx
.
saved_variables
grad_src
=
None
if
ctx
.
needs_input_grad
[
1
]:
grad_src
=
grad_out
[
index
]
return
None
,
grad_src
,
None
,
None
def
scatter_add
(
src
,
index
,
dim
=-
1
,
out
=
None
,
dim_size
=
None
,
fill_value
=
0
):
r
"""
|
.. image:: https://raw.githubusercontent.com/rusty1s/pytorch_scatter/
master/docs/source/_figures/add.svg?sanitize=true
:align: center
:width: 400px
|
Sums all values from the :attr:`src` tensor into :attr:`out` at the indices
specified in the :attr:`index` tensor along an given axis :attr:`dim`. For
each value in :attr:`src`, its output index is specified by its index in
:attr:`input` for dimensions outside of :attr:`dim` and by the
corresponding value in :attr:`index` for dimension :attr:`dim`. If
multiple indices reference the same location, their **contributions add**.
Formally, if :attr:`src` and :attr:`index` are n-dimensional tensors with
size :math:`(x_0, ..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})` and
:attr:`dim` = `i`, then :attr:`out` must be an n-dimensional tensor with
size :math:`(x_0, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})`. Moreover, the
values of :attr:`index` must be between `0` and `out.size(dim) - 1`.
For one-dimensional tensors, the operation computes
.. math::
\mathrm{out}_i = \mathrm{out}_i + \sum_j \mathrm{src}_j
where sum is over :math:`j` such that :math:`\mathrm{index}_j = i`.
Args:
src (Tensor): The source tensor.
index (LongTensor): The indices of elements to scatter.
dim (int, optional): The axis along which to index.
(default: :obj:`-1`)
out (Tensor, optional): The destination tensor. (default: :obj:`None`)
dim_size (int, optional): If :attr:`out` is not given, automatically
create output with size :attr:`dim_size` at dimension :attr:`dim`.
If :attr:`dim_size` is not given, a minimal sized output tensor is
returned. (default: :obj:`None`)
fill_value (int, optional): If :attr:`out` is not given, automatically
fill output tensor with :attr:`fill_value`. (default: :obj:`0`)
:rtype: :class:`Tensor`
.. testsetup::
import torch
.. testcode::
from torch_scatter import scatter_add_
src = torch.tensor([[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]])
index = torch.tensor([[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]])
out = src.new_zeros((2, 6))
out = scatter_add(src, index, out=out)
print(out)
.. testoutput::
0 0 4 3 3 0
2 4 4 0 0 0
[torch.FloatTensor of size 2x6]
"""
out
,
index
=
gen
(
src
,
index
,
dim
,
out
,
dim_size
,
fill_value
)
return
ScatterAdd
.
apply
(
out
,
src
,
index
,
dim
)
torch_scatter/functions/add.py
deleted
100644 → 0
View file @
367b0af0
from
.utils
import
gen_output
def
scatter_add_
(
output
,
index
,
input
,
dim
=
0
):
r
"""
|
.. image:: https://raw.githubusercontent.com/rusty1s/pytorch_scatter/
master/docs/source/_figures/add.svg?sanitize=true
:align: center
:width: 400px
|
Sums all values from the :attr:`input` tensor into :attr:`output` at the
indices specified in the :attr:`index` tensor along an given axis
:attr:`dim`. For each value in :attr:`input`, its output index is specified
by its index in :attr:`input` for dimensions outside of :attr:`dim` and by
the corresponding value in :attr:`index` for dimension :attr:`dim`. If
multiple indices reference the same location, their **contributions add**.
If :attr:`input` and :attr:`index` are n-dimensional tensors with size
:math:`(x_0, ..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})` and
:attr:`dim` = `i`, then :attr:`output` must be an n-dimensional tensor with
size :math:`(x_0, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})`. Moreover, the
values of :attr:`index` must be between `0` and `output.size(dim) - 1`.
For one-dimensional tensors, the operation computes
.. math::
\mathrm{output}_i = \mathrm{output}_i + \sum_j \mathrm{input}_j
where sum is over :math:`j` such that :math:`\mathrm{index}_j = i`.
Args:
output (Tensor): The destination tensor
index (LongTensor): The indices of elements to scatter
input (Tensor): The source tensor
dim (int, optional): The axis along which to index
:rtype: :class:`Tensor`
.. testsetup::
import torch
.. testcode::
from torch_scatter import scatter_add_
input = torch.Tensor([[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]])
index = torch.LongTensor([[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]])
output = torch.zeros(2, 6)
scatter_add_(output, index, input, dim=1)
print(output)
.. testoutput::
0 0 4 3 3 0
2 4 4 0 0 0
[torch.FloatTensor of size 2x6]
"""
return
output
.
scatter_add_
(
dim
,
index
,
input
)
def
scatter_add
(
index
,
input
,
dim
=
0
,
size
=
None
,
fill_value
=
0
):
r
"""Sums all values from the :attr:`input` tensor at the indices specified
in the :attr:`index` tensor along an given axis :attr:`dim` (`cf.`
:meth:`~torch_scatter.scatter_add_`).
The output size at dimension :attr:`dim` is given by :attr:`size` and must
be at least size `index.max(dim) - 1`. If :attr:`size` is not given, a
minimal sized output tensor is returned. The output tensor is prefilled
with the specified value from :attr:`fill_value`.
For one-dimensional tensors, the operation computes
.. math::
\mathrm{output}_i = \mathrm{fill\_value} + \sum_j \mathrm{input}_j
where sum is over :math:`j` such that :math:`\mathrm{index}_j = i`.
Args:
index (LongTensor): The indices of elements to scatter
input (Tensor): The source tensor
dim (int, optional): The axis along which to index
size (int, optional): Output size at dimension :attr:`dim`
fill_value (int, optional): Initial filling of output tensor
:rtype: :class:`Tensor`
.. testsetup::
import torch
.. testcode::
from torch_scatter import scatter_add
input = torch.Tensor([[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]])
index = torch.LongTensor([[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]])
output = scatter_add(index, input, dim=1)
print(output)
.. testoutput::
0 0 4 3 3 0
2 4 4 0 0 0
[torch.FloatTensor of size 2x6]
"""
output
=
gen_output
(
index
,
input
,
dim
,
size
,
fill_value
)
return
scatter_add_
(
output
,
index
,
input
,
dim
)
torch_scatter/utils/gen.py
0 → 100644
View file @
8b8df250
from
itertools
import
repeat
def
gen
(
src
,
index
,
dim
=-
1
,
out
=
None
,
dim_size
=
None
,
fill_value
=
0
):
# Automatically expand index tensor to the right dimensions.
if
index
.
dim
()
==
1
:
index_size
=
[
*
repeat
(
1
,
src
.
dim
())]
index_size
[
dim
]
=
src
.
size
(
dim
)
index
=
index
.
view
(
index_size
).
expand_as
(
src
)
# Generate output tensor if not given.
if
out
is
None
:
dim_size
=
index
.
max
()
+
1
if
dim_size
is
None
else
dim_size
out_size
=
[
*
src
.
size
()]
out_size
[
dim
]
=
dim_size
out
=
src
.
new_full
(
out_size
,
fill_value
)
return
out
,
index
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment