Commit f25c0e74 authored by rusty1s's avatar rusty1s
Browse files

added sub

parent 8b8df250
...@@ -7,7 +7,7 @@ import torch_scatter ...@@ -7,7 +7,7 @@ import torch_scatter
from .utils import devices from .utils import devices
funcs = ['add'] funcs = ['add', 'sub']
indices = [2, 0, 1, 1, 0] indices = [2, 0, 1, 1, 0]
......
...@@ -10,8 +10,30 @@ tests = [{ ...@@ -10,8 +10,30 @@ tests = [{
'name': 'add', 'name': 'add',
'src': [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]], 'src': [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]],
'index': [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]], 'index': [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]],
'dim': -1,
'fill_value': 0, 'fill_value': 0,
'expected': [[0, 0, 4, 3, 3, 0], [2, 4, 4, 0, 0, 0]] 'expected': [[0, 0, 4, 3, 3, 0], [2, 4, 4, 0, 0, 0]]
}, {
'name': 'add',
'src': [[5, 2], [2, 5], [4, 3], [1, 3]],
'index': [[0, 0], [1, 1], [1, 1], [0, 0]],
'dim': 0,
'fill_value': 0,
'expected': [[6, 5], [6, 8]]
}, {
'name': 'sub',
'src': [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]],
'index': [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]],
'dim': -1,
'fill_value': 9,
'expected': [[9, 9, 5, 6, 6, 9], [7, 5, 5, 9, 9, 9]]
}, {
'name': 'sub',
'src': [[5, 2], [2, 2], [4, 2], [1, 3]],
'index': [[0, 0], [1, 1], [1, 1], [0, 0]],
'dim': 0,
'fill_value': 9,
'expected': [[3, 4], [3, 5]]
}] }]
...@@ -21,29 +43,6 @@ def test_forward(test, dtype, device): ...@@ -21,29 +43,6 @@ def test_forward(test, dtype, device):
index = tensor(test['index'], torch.long, device) index = tensor(test['index'], torch.long, device)
op = getattr(torch_scatter, 'scatter_{}'.format(test['name'])) op = getattr(torch_scatter, 'scatter_{}'.format(test['name']))
output = op(src, index, fill_value=test['fill_value']) output = op(src, index, test['dim'], fill_value=test['fill_value'])
assert output.tolist() == test['expected'] assert output.tolist() == test['expected']
# name = data[i]['name']
# index = torch.LongTensor(data[i]['index'])
# input = Tensor(tensor, data[i]['input'])
# dim = data[i]['dim']
# fill_value = data[i]['fill_value']
# expected = torch.FloatTensor(data[i]['expected']).type_as(input)
# output = expected.new(expected.size()).fill_(fill_value)
# func = getattr(torch_scatter, 'scatter_{}_'.format(name))
# result = func(output, index, input, dim)
# assert output.tolist() == expected.tolist()
# if 'expected_arg' in data[i]:
# expected_arg = torch.LongTensor(data[i]['expected_arg'])
# assert result[1].tolist() == expected_arg.tolist()
# func = getattr(torch_scatter, 'scatter_{}'.format(name))
# result = func(index, input, dim, fill_value=fill_value)
# if 'expected_arg' not in data[i]:
# assert result.tolist() == expected.tolist()
# else:
# expected_arg = torch.LongTensor(data[i]['expected_arg'])
# assert result[0].tolist() == expected.tolist()
# assert result[1].tolist() == expected_arg.tolist()
from .add import ScatterAdd, scatter_add from .add import scatter_add
from .sub import scatter_sub
__version__ = '1.0.0' __version__ = '1.0.0'
__all__ = ['ScatterAdd', 'scatter_add', '__version__'] __all__ = ['scatter_add', 'scatter_sub', '__version__']
...@@ -73,7 +73,7 @@ def scatter_add(src, index, dim=-1, out=None, dim_size=None, fill_value=0): ...@@ -73,7 +73,7 @@ def scatter_add(src, index, dim=-1, out=None, dim_size=None, fill_value=0):
.. testcode:: .. testcode::
from torch_scatter import scatter_add_ from torch_scatter import scatter_add
src = torch.tensor([[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]]) src = torch.tensor([[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]])
index = torch.tensor([[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]]) index = torch.tensor([[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]])
out = src.new_zeros((2, 6)) out = src.new_zeros((2, 6))
......
from .utils import gen_output
def scatter_sub_(output, index, input, dim=0):
r"""
|
.. image:: https://raw.githubusercontent.com/rusty1s/pytorch_scatter/
master/docs/source/_figures/sub.svg?sanitize=true
:align: center
:width: 400px
|
Subtracts all values from the :attr:`input` tensor into :attr:`output` at
the indices specified in the :attr:`index` tensor along an given axis
:attr:`dim`. If multiple indices reference the same location, their
**negated contributions add** (`cf.` :meth:`~torch_scatter.scatter_add_`).
For one-dimensional tensors, the operation computes
.. math::
\mathrm{output}_i = \mathrm{output}_i - \sum_j \mathrm{input}_j
where sum is over :math:`j` such that :math:`\mathrm{index}_j = i`.
Args:
output (Tensor): The destination tensor
index (LongTensor): The indices of elements to scatter
input (Tensor): The source tensor
dim (int, optional): The axis along which to index
:rtype: :class:`Tensor`
.. testsetup::
import torch
.. testcode::
from torch_scatter import scatter_sub_
input = torch.Tensor([[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]])
index = torch.LongTensor([[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]])
output = torch.zeros(2, 6)
scatter_sub_(output, index, input, dim=1)
print(output)
.. testoutput::
0 0 -4 -3 -3 0
-2 -4 -4 0 0 0
[torch.FloatTensor of size 2x6]
"""
return output.scatter_add_(dim, index, -1 * input)
def scatter_sub(index, input, dim=0, size=None, fill_value=0):
r"""Subtracts all values from the :attr:`input` tensor at the indices
specified in the :attr:`index` tensor along an given axis :attr:`dim`
(`cf.` :meth:`~torch_scatter.scatter_sub_` and
:meth:`~torch_scatter.scatter_add`).
For one-dimensional tensors, the operation computes
.. math::
\mathrm{output}_i = \mathrm{fill\_value} - \sum_j \mathrm{input}_j
where sum is over :math:`j` such that :math:`\mathrm{index}_j = i`.
Args:
index (LongTensor): The indices of elements to scatter
input (Tensor): The source tensor
dim (int, optional): The axis along which to index
size (int, optional): Output size at dimension :attr:`dim`
fill_value (int, optional): Initial filling of output tensor
:rtype: :class:`Tensor`
.. testsetup::
import torch
.. testcode::
from torch_scatter import scatter_sub
input = torch.Tensor([[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]])
index = torch.LongTensor([[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]])
output = scatter_sub(index, input, dim=1)
print(output)
.. testoutput::
0 0 -4 -3 -3 0
-2 -4 -4 0 0 0
[torch.FloatTensor of size 2x6]
"""
output = gen_output(index, input, dim, size, fill_value)
return scatter_sub_(output, index, input, dim)
from .add import scatter_add
def scatter_sub(src, index, dim=-1, out=None, dim_size=None, fill_value=0):
r"""
|
.. image:: https://raw.githubusercontent.com/rusty1s/pytorch_scatter/
master/docs/source/_figures/sub.svg?sanitize=true
:align: center
:width: 400px
|
Subtracts all values from the :attr:`src` tensor into :attr:`out` at the
indices specified in the :attr:`index` tensor along an given axis
:attr:`dim`.If multiple indices reference the same location, their
**negated contributions add** (`cf.` :meth:`~torch_scatter.scatter_add`).
For one-dimensional tensors, the operation computes
.. math::
\mathrm{out}_i = \mathrm{out}_i - \sum_j \mathrm{src}_j
where sum is over :math:`j` such that :math:`\mathrm{index}_j = i`.
Args:
src (Tensor): The source tensor.
index (LongTensor): The indices of elements to scatter.
dim (int, optional): The axis along which to index.
(default: :obj:`-1`)
out (Tensor, optional): The destination tensor. (default: :obj:`None`)
dim_size (int, optional): If :attr:`out` is not given, automatically
create output with size :attr:`dim_size` at dimension :attr:`dim`.
If :attr:`dim_size` is not given, a minimal sized output tensor is
returned. (default: :obj:`None`)
fill_value (int, optional): If :attr:`out` is not given, automatically
fill output tensor with :attr:`fill_value`. (default: :obj:`0`)
:rtype: :class:`Tensor`
.. testsetup::
import torch
.. testcode::
from torch_scatter import scatter_sub
src = torch.tensor([[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]])
index = torch.tensor([[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]])
out = src.new_zeros((2, 6))
out = scatter_sub(src, index, out=out)
print(out)
.. testoutput::
0 0 -4 -3 -3 0
-2 -4 -4 0 0 0
[torch.FloatTensor of size 2x6]
"""
return scatter_add(src.neg(), index, dim, out, dim_size, fill_value)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment