Commit 452097ba authored by rusty1s's avatar rusty1s
Browse files

multi dim bugfixes

parent c3014a78
......@@ -2,7 +2,7 @@ from os import path as osp
from setuptools import setup, find_packages
__version__ = '1.0.1'
__version__ = '1.0.2'
url = 'https://github.com/rusty1s/pytorch_scatter'
install_requires = ['cffi']
......
......@@ -14,30 +14,30 @@ indices = [2, 0, 1, 1, 0]
@pytest.mark.parametrize('func,device', product(funcs, devices))
def test_backward(func, device):
index = torch.tensor(indices, dtype=torch.long, device=device)
src = torch.rand(index.size(), dtype=torch.double, device=device)
src = torch.rand((index.size(0), 2), dtype=torch.double, device=device)
src.requires_grad_()
op = getattr(torch_scatter, 'scatter_{}'.format(func))
data = (src, index)
data = (src, index, 0)
assert gradcheck(op, data, eps=1e-6, atol=1e-4) is True
tests = [{
'name': 'max',
'src': [1, 2, 3, 4, 5],
'src': [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]],
'index': [2, 0, 1, 1, 0],
'dim': 0,
'fill_value': 0,
'grad': [4, 8, 6],
'expected': [6, 0, 0, 8, 4]
'grad': [[4, 4], [8, 8], [6, 6]],
'expected': [[6, 6], [0, 0], [0, 0], [8, 8], [4, 4]],
}, {
'name': 'min',
'src': [1, 2, 3, 4, 5],
'src': [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]],
'index': [2, 0, 1, 1, 0],
'dim': 0,
'fill_value': 3,
'grad': [4, 8, 6],
'expected': [6, 4, 8, 0, 0]
'grad': [[4, 4], [8, 8], [6, 6]],
'expected': [[6, 6], [4, 4], [8, 8], [0, 0], [0, 0]],
}]
......
......@@ -12,70 +12,70 @@ tests = [{
'index': [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]],
'dim': -1,
'fill_value': 0,
'expected': [[0, 0, 4, 3, 3, 0], [2, 4, 4, 0, 0, 0]]
'expected': [[0, 0, 4, 3, 3, 0], [2, 4, 4, 0, 0, 0]],
}, {
'name': 'add',
'src': [[5, 2], [2, 5], [4, 3], [1, 3]],
'index': [[0, 0], [1, 1], [1, 1], [0, 0]],
'index': [0, 1, 1, 0],
'dim': 0,
'fill_value': 0,
'expected': [[6, 5], [6, 8]]
'expected': [[6, 5], [6, 8]],
}, {
'name': 'sub',
'src': [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]],
'index': [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]],
'dim': -1,
'fill_value': 9,
'expected': [[9, 9, 5, 6, 6, 9], [7, 5, 5, 9, 9, 9]]
'expected': [[9, 9, 5, 6, 6, 9], [7, 5, 5, 9, 9, 9]],
}, {
'name': 'sub',
'src': [[5, 2], [2, 2], [4, 2], [1, 3]],
'index': [[0, 0], [1, 1], [1, 1], [0, 0]],
'index': [0, 1, 1, 0],
'dim': 0,
'fill_value': 9,
'expected': [[3, 4], [3, 5]]
'expected': [[3, 4], [3, 5]],
}, {
'name': 'mul',
'src': [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]],
'index': [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]],
'dim': -1,
'fill_value': 1,
'expected': [[1, 1, 4, 3, 2, 0], [0, 4, 3, 1, 1, 1]]
'expected': [[1, 1, 4, 3, 2, 0], [0, 4, 3, 1, 1, 1]],
}, {
'name': 'mul',
'src': [[5, 2], [2, 5], [4, 3], [1, 3]],
'index': [[0, 0], [1, 1], [1, 1], [0, 0]],
'index': [0, 1, 1, 0],
'dim': 0,
'fill_value': 1,
'expected': [[5, 6], [8, 15]]
'expected': [[5, 6], [8, 15]],
}, {
'name': 'div',
'src': [[2, 1, 1, 4, 2], [1, 2, 1, 2, 4]],
'index': [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]],
'dim': -1,
'fill_value': 1,
'expected': [[1, 1, 0.25, 0.5, 0.5, 1], [0.5, 0.25, 0.5, 1, 1, 1]]
'expected': [[1, 1, 0.25, 0.5, 0.5, 1], [0.5, 0.25, 0.5, 1, 1, 1]],
}, {
'name': 'div',
'src': [[4, 2], [2, 1], [4, 2], [1, 2]],
'index': [[0, 0], [1, 1], [1, 1], [0, 0]],
'index': [0, 1, 1, 0],
'dim': 0,
'fill_value': 1,
'expected': [[0.25, 0.25], [0.125, 0.5]]
'expected': [[0.25, 0.25], [0.125, 0.5]],
}, {
'name': 'mean',
'src': [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]],
'index': [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]],
'dim': -1,
'fill_value': 0,
'expected': [[0, 0, 4, 3, 1.5, 0], [1, 4, 2, 0, 0, 0]]
'expected': [[0, 0, 4, 3, 1.5, 0], [1, 4, 2, 0, 0, 0]],
}, {
'name': 'mean',
'src': [[5, 2], [2, 5], [4, 3], [1, 3]],
'index': [[0, 0], [1, 1], [1, 1], [0, 0]],
'index': [0, 1, 1, 0],
'dim': 0,
'fill_value': 0,
'expected': [[3, 2.5], [3, 4]]
'expected': [[3, 2.5], [3, 4]],
}, {
'name': 'max',
'src': [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]],
......@@ -83,15 +83,15 @@ tests = [{
'dim': -1,
'fill_value': 0,
'expected': [[0, 0, 4, 3, 2, 0], [2, 4, 3, 0, 0, 0]],
'expected_arg': [[-1, -1, 3, 4, 0, 1], [1, 4, 3, -1, -1, -1]]
'expected_arg': [[-1, -1, 3, 4, 0, 1], [1, 4, 3, -1, -1, -1]],
}, {
'name': 'max',
'src': [[5, 2], [2, 5], [4, 3], [1, 3]],
'index': [[0, 0], [1, 1], [1, 1], [0, 0]],
'index': [0, 1, 1, 0],
'dim': 0,
'fill_value': 0,
'expected': [[5, 3], [4, 5]],
'expected_arg': [[0, 3], [2, 1]]
'expected_arg': [[0, 3], [2, 1]],
}, {
'name': 'min',
'src': [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]],
......@@ -99,15 +99,15 @@ tests = [{
'dim': -1,
'fill_value': 9,
'expected': [[9, 9, 4, 3, 1, 0], [0, 4, 1, 9, 9, 9]],
'expected_arg': [[-1, -1, 3, 4, 2, 1], [0, 4, 2, -1, -1, -1]]
'expected_arg': [[-1, -1, 3, 4, 2, 1], [0, 4, 2, -1, -1, -1]],
}, {
'name': 'min',
'src': [[5, 2], [2, 5], [4, 3], [1, 3]],
'index': [[0, 0], [1, 1], [1, 1], [0, 0]],
'index': [0, 1, 1, 0],
'dim': 0,
'fill_value': 9,
'expected': [[1, 2], [2, 3]],
'expected_arg': [[3, 0], [1, 2]]
'expected_arg': [[3, 0], [1, 2]],
}]
......
......@@ -6,7 +6,7 @@ from .mean import scatter_mean
from .max import scatter_max
from .min import scatter_min
__version__ = '1.0.1'
__version__ = '1.0.2'
__all__ = [
'scatter_add', 'scatter_sub', 'scatter_mul', 'scatter_div', 'scatter_mean',
......
from torch.autograd import Function
from .utils.gen import gen
class ScatterAdd(Function):
@staticmethod
def forward(ctx, out, src, index, dim):
ctx.mark_dirty(out)
ctx.save_for_backward(index)
return out.scatter_add_(dim, index, src)
@staticmethod
def backward(ctx, grad_out):
index, = ctx.saved_variables
grad_src = None
if ctx.needs_input_grad[1]:
grad_src = grad_out[index]
return None, grad_src, None, None
def scatter_add(src, index, dim=-1, out=None, dim_size=None, fill_value=0):
r"""
|
......@@ -90,4 +70,4 @@ def scatter_add(src, index, dim=-1, out=None, dim_size=None, fill_value=0):
[ 2, 4, 4, 0, 0, 0]])
"""
src, out, index, dim = gen(src, index, dim, out, dim_size, fill_value)
return ScatterAdd.apply(out, src, index, dim)
return out.scatter_add_(dim, index, src)
......@@ -12,6 +12,7 @@ class ScatterDiv(Function):
ctx.mark_dirty(out)
ctx.save_for_backward(out, src, index)
ctx.dim = dim
return out
......@@ -21,7 +22,7 @@ class ScatterDiv(Function):
grad_src = None
if ctx.needs_input_grad[1]:
grad_src = -(out * grad_out)[index] / src
grad_src = -(out * grad_out).gather(ctx.dim, index) / src
return None, grad_src, None, None
......
......@@ -15,6 +15,7 @@ class ScatterMean(Function):
ctx.mark_dirty(out)
ctx.save_for_backward(index, count)
ctx.dim = dim
return out
......@@ -24,7 +25,8 @@ class ScatterMean(Function):
grad_src = None
if ctx.needs_input_grad[1]:
grad_src = grad_out[index] / count[index]
grad_src = grad_out.gather(ctx.dim, index)
grad_src /= count.gather(ctx.dim, index)
return None, grad_src, None, None
......
......@@ -12,6 +12,7 @@ class ScatterMul(Function):
ctx.mark_dirty(out)
ctx.save_for_backward(out, src, index)
ctx.dim = dim
return out
......@@ -21,7 +22,7 @@ class ScatterMul(Function):
grad_src = None
if ctx.needs_input_grad[1]:
grad_src = (grad_out * out)[index] / src
grad_src = (grad_out * out).gather(ctx.dim, index) / src
return None, grad_src, None, None
......
......@@ -12,7 +12,7 @@ def gen(src, index, dim=-1, out=None, dim_size=None, fill_value=0):
# Generate output tensor if not given.
if out is None:
dim_size = index.max() + 1 if dim_size is None else dim_size
dim_size = index.max().item() + 1 if dim_size is None else dim_size
out_size = list(src.size())
out_size[dim] = dim_size
out = src.new_full(out_size, fill_value)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment