"...model/git@developer.sourcefind.cn:wangsen/mineru.git" did not exist on "f4ffdfe8ef9c7242d4c2e256d76f1aeef0ccc823"
Commit 0f1dc7bc authored by rusty1s's avatar rusty1s
Browse files

modular tests

parent 204b7946
......@@ -5,4 +5,4 @@ description-file = README.md
test = pytest
[tool:pytest]
addopts = --capture=no --cov
addopts = --capture=no
[
{
"name": "add",
"output": [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
"index": [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]],
"input": [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]],
"dim": 1,
"grad": [[10, 20, 30, 40, 50, 60], [15, 25, 35, 45, 55, 65]],
"expected": [[50, 60, 50, 30, 40], [15, 15, 35, 35, 25]]
},
{
"name": "max",
"output": [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
"index": [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]],
"input": [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]],
"dim": 1,
"grad": [[10, 20, 30, 40, 50, 60], [15, 25, 35, 45, 55, 65]],
"expected": [[50, 60, 0, 30, 40], [0, 15, 0, 35, 25]]
}
]
[
{
"name": "add",
"output": [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
"index": [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]],
"input": [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]],
"dim": 1,
"expected": [[0, 0, 4, 3, 3, 0], [2, 4, 4, 0, 0, 0]]
}
]
import pytest
import torch
from torch.autograd import Variable
from torch_scatter import scatter_add_, scatter_add
from .utils import tensor_strs, Tensor
@pytest.mark.parametrize('str', tensor_strs)
def test_scatter_add(str):
input = [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]]
index = [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]]
input = Tensor(str, input)
index = torch.LongTensor(index)
output = input.new(2, 6).fill_(0)
expected_output = [[0, 0, 4, 3, 3, 0], [2, 4, 4, 0, 0, 0]]
scatter_add_(output, index, input, dim=1)
assert output.tolist() == expected_output
output = scatter_add(index, input, dim=1)
assert output.tolist() == expected_output
output = Variable(output).fill_(0)
index = Variable(index)
input = Variable(input, requires_grad=True)
scatter_add_(output, index, input, dim=1)
grad_output = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]
grad_output = Tensor(str, grad_output)
output.backward(grad_output)
assert index.data.tolist() == input.grad.data.tolist()
from os import path as osp
from itertools import product
import pytest
import json
import torch
from torch.autograd import Variable as V
import torch_scatter
from .utils import tensors, Tensor
f = open(osp.join(osp.dirname(__file__), 'backward.json'), 'r')
data = json.load(f)
f.close()
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
def test_backward_cpu(tensor, i):
name = data[i]['name']
output = V(Tensor(tensor, data[i]['output']))
index = V(torch.LongTensor(data[i]['index']))
input = V(Tensor(tensor, data[i]['input']), requires_grad=True)
dim = data[i]['dim']
grad = Tensor(tensor, data[i]['grad'])
expected = Tensor(tensor, data[i]['expected'])
func = getattr(torch_scatter, 'scatter_{}_'.format(name))
func(output, index, input, dim)
output.backward(grad)
assert input.grad.data.tolist() == expected.tolist()
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
def test_backward_gpu(tensor, i):
name = data[i]['name']
output = V(Tensor(tensor, data[i]['output'])).cuda()
index = V(torch.LongTensor(data[i]['index'])).cuda()
input = V(Tensor(tensor, data[i]['input']), requires_grad=True).cuad()
dim = data[i]['dim']
grad = Tensor(tensor, data[i]['grad']).cuda()
expected = Tensor(tensor, data[i]['expected'])
func = getattr(torch_scatter, 'scatter_{}_'.format(name))
func(output, index, input, dim)
output.backward(grad)
assert input.grad.data.cpu().tolist() == expected.tolist()
from os import path as osp
from itertools import product
import pytest
import json
import torch
import torch_scatter
from .utils import tensors, Tensor
f = open(osp.join(osp.dirname(__file__), 'forward.json'), 'r')
data = json.load(f)
f.close()
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
def test_forward_cpu(tensor, i):
name = data[i]['name']
output = Tensor(tensor, data[i]['output'])
index = torch.LongTensor(data[i]['index'])
input = Tensor(tensor, data[i]['input'])
dim = data[i]['dim']
expected = Tensor(tensor, data[i]['expected'])
func = getattr(torch_scatter, 'scatter_{}_'.format(name))
func(output, index, input, dim)
assert output.tolist() == expected.tolist()
func = getattr(torch_scatter, 'scatter_{}'.format(name))
output = func(index, input, dim)
assert output.tolist() == expected.tolist()
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
def test_forward_gpu(tensor, i):
name = data[i]['name']
output = Tensor(tensor, data[i]['output']).cuda()
index = torch.LongTensor(data[i]['index']).cuda()
input = Tensor(tensor, data[i]['input']).cuda()
dim = data[i]['dim']
expected = Tensor(tensor, data[i]['expected'])
func = getattr(torch_scatter, 'scatter_{}_'.format(name))
func(output, index, input, dim)
assert output.cpu().tolist() == expected.tolist()
func = getattr(torch_scatter, 'scatter_{}'.format(name))
output = func(index, input, dim)
assert output.cpu().tolist() == expected.tolist()
import pytest
import torch
from torch.autograd import Variable
from torch_scatter import scatter_max_, scatter_max
from .utils import tensor_strs, Tensor
@pytest.mark.parametrize('str', tensor_strs)
def test_scatter_max(str):
input = [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]]
index = [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]]
input = Tensor(str, input)
index = torch.LongTensor(index)
output = input.new(2, 6).fill_(0)
expected_output = [[0, 0, 4, 3, 2, 0], [2, 4, 3, 0, 0, 0]]
expected_arg_output = [[-1, -1, 3, 4, 0, 1], [1, 4, 3, -1, -1, -1]]
_, arg_output = scatter_max_(output, index, input, dim=1)
assert output.tolist() == expected_output
assert arg_output.tolist() == expected_arg_output
output, arg_output = scatter_max(index, input, dim=1)
assert output.tolist() == expected_output
assert arg_output.tolist() == expected_arg_output
output = Variable(output).fill_(0)
index = Variable(index)
input = Variable(input, requires_grad=True)
scatter_max_(output, index, input, dim=1)
grad_output = [[10, 20, 30, 40, 50, 60], [15, 25, 35, 45, 55, 65]]
grad_output = Tensor(str, grad_output)
expected_grad_input = [[50, 60, 0, 30, 40], [0, 15, 0, 35, 25]]
output.backward(grad_output)
assert input.grad.data.tolist() == expected_grad_input
@pytest.mark.parametrize('str', tensor_strs)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
def test_scatter_cuda_max(str):
input = [[2, 0, 1, 4, 3], [0, 2, 1, 3, 4]]
index = [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]]
input = Tensor(str, input)
index = torch.LongTensor(index)
output = input.new(2, 6).fill_(0)
expected_output = [[0, 0, 4, 3, 2, 0], [2, 4, 3, 0, 0, 0]]
expected_arg_output = [[-1, -1, 3, 4, 0, 1], [1, 4, 3, -1, -1, -1]]
output, index, input = output.cuda(), index.cuda(), input.cuda()
_, arg_output = scatter_max_(output, index, input, dim=1)
assert output.cpu().tolist() == expected_output
assert arg_output.cpu().tolist() == expected_arg_output
output, arg_output = scatter_max(index, input, dim=1)
assert output.cpu().tolist() == expected_output
assert arg_output.cpu().tolist() == expected_arg_output
output = Variable(output).fill_(0)
index = Variable(index)
input = Variable(input, requires_grad=True)
scatter_max_(output, index, input, dim=1)
grad_output = [[10, 20, 30, 40, 50, 60], [15, 25, 35, 45, 55, 65]]
grad_output = Tensor(str, grad_output).cuda()
expected_grad_input = [[50, 60, 0, 30, 40], [0, 15, 0, 35, 25]]
output.backward(grad_output)
assert input.grad.data.cpu().tolist() == expected_grad_input
import pytest
import torch
from torch.autograd import Variable
from torch_scatter import scatter_mean_, scatter_mean
from .utils import tensor_strs, Tensor
@pytest.mark.parametrize('str', tensor_strs)
def test_scatter_mean(str):
input = [[2, 0, 8, 4, 3], [0, 2, 1, 3, 4]]
index = [[4, 5, 4, 2, 3], [0, 0, 2, 2, 1]]
input = Tensor(str, input)
index = torch.LongTensor(index)
output = input.new(2, 6).fill_(0)
expected_output = [[0, 0, 4, 3, 5, 0], [1, 4, 2, 0, 0, 0]]
scatter_mean_(output, index, input, dim=1)
assert output.tolist() == expected_output
output = scatter_mean(index, input, dim=1)
assert output.tolist() == expected_output
output = Variable(output).fill_(0)
index = Variable(index)
input = Variable(input, requires_grad=True)
scatter_mean_(output, index, input, dim=1)
grad_output = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]
grad_output = Tensor(str, grad_output)
output.backward(grad_output)
assert index.data.tolist() == input.grad.data.tolist()
import torch
from torch._tensor_docs import tensor_classes
tensor_strs = [t[:-4] for t in tensor_classes]
tensors = [t[:-4] for t in tensor_classes]
def Tensor(str, x):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment