Commit 8f8fbb9f authored by Hang Zhang's avatar Hang Zhang
Browse files

v1.0.1

parent aa9af7fd
......@@ -13,6 +13,10 @@ import torch
import torch.nn.functional as F
from torch.autograd import Variable, gradcheck
import torchvision.models as models
EPS = 1e-6
def test_aggregateP():
B,N,K,D = 2,3,4,5
A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5),
......@@ -20,7 +24,7 @@ def test_aggregateP():
R = Variable(torch.cuda.DoubleTensor(B,N,K,D).uniform_(-0.5,0.5),
requires_grad=True)
input = (A, R)
test = gradcheck(encoding.aggregateP(), input, eps=1e-6, atol=1e-4)
test = gradcheck(encoding.functions.aggregateP, input, eps=1e-6, atol=1e-4)
print('Testing aggregate(): {}'.format(test))
......@@ -33,7 +37,7 @@ def test_aggregate():
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
input = (A, X, C)
test = gradcheck(encoding.aggregate(), input, eps=1e-6, atol=1e-4)
test = gradcheck(encoding.functions.aggregate, input, eps=1e-6, atol=1e-4)
print('Testing aggregate(): {}'.format(test))
......@@ -46,7 +50,7 @@ def test_scaledL2():
S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5),
requires_grad=True)
input = (X, C, S)
test = gradcheck(encoding.scaledL2(), input, eps=1e-6, atol=1e-4)
test = gradcheck(encoding.functions.scaledL2, input, eps=1e-6, atol=1e-4)
print('Testing scaledL2(): {}'.format(test))
......@@ -59,15 +63,14 @@ def test_assign():
S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5),
requires_grad=True)
R = encoding.residual()(X, C)
A1 = encoding.assign(R, S)
E1 = encoding.aggregateP()(A1, R)
R = encoding.functions.residual(X, C)
A1 = encoding.functions.assign(R, S)
E1 = encoding.functions.aggregateP(A1, R)
A2 = F.softmax(encoding.scaledL2()(X,C,S))
E2 = encoding.aggregate()(A2, X, C)
A2 = F.softmax(encoding.functions.scaledL2(X,C,S))
E2 = encoding.functions.aggregate(A2, X, C)
print('E1', E1)
print('E2', E2)
print('Testing assign(): {}'.format((E1-E2).norm(2).data[0] < EPS))
def test_residual():
......@@ -77,17 +80,19 @@ def test_residual():
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
input = (X, C)
test = gradcheck(encoding.residual(), input, eps=1e-6, atol=1e-4)
test = gradcheck(encoding.functions.residual, input, eps=1e-6, atol=1e-4)
print('Testing residual(): {}'.format(test))
"""
def test_square_squeeze():
B,N,K,D = 2,3,4,5
R = Variable(torch.cuda.DoubleTensor(B,N,K,D).uniform_(-0.5,0.5),
requires_grad=True)
input = (R,)
test = gradcheck(encoding.square_squeeze(), input, eps=1e-6, atol=1e-4)
test = gradcheck(encoding.functions.square_squeeze(), input, eps=1e-6, atol=1e-4)
print('Testing square_squeeze(): {}'.format(test))
"""
def test_encoding():
......@@ -95,7 +100,7 @@ def test_encoding():
X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5),
requires_grad=True)
input = (X,)
layer = encoding.Encoding(C,K).double().cuda()
layer = encoding.nn.Encoding(C,K).double().cuda()
test = gradcheck(layer, input, eps=1e-6, atol=1e-4)
print('Testing encoding(): {}'.format(test))
......@@ -105,7 +110,7 @@ def test_encodingP():
X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5),
requires_grad=True)
input = (X,)
layer = encoding.EncodingP(C,K).double().cuda()
layer = encoding.nn.EncodingP(C,K).double().cuda()
test = gradcheck(layer, input, eps=1e-6, atol=1e-4)
print('Testing encodingP(): {}'.format(test))
......@@ -115,18 +120,42 @@ def test_sum_square():
X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5),
requires_grad=True)
input = (X,)
test = gradcheck(encoding.sum_square(), input, eps=1e-6, atol=1e-4)
test = gradcheck(encoding.functions.sum_square, input, eps=1e-6, atol=1e-4)
print('Testing sum_square(): {}'.format(test))
def test_dilated_densenet():
net = encoding.dilated.densenet161(True).cuda().eval()
print(net)
net2 = models.densenet161(True).cuda().eval()
x=Variable(torch.Tensor(1,3,224,224).uniform_(-0.5,0.5)).cuda()
y = net.features(x)
y2 = net2.features(x)
print(y[0][0])
print(y2[0][0])
def test_dilated_avgpool():
X = Variable(torch.cuda.FloatTensor(1,3,75,75).uniform_(-0.5,0.5))
input = (X,)
layer = encoding.nn.DilatedAvgPool2d(kernel_size=2, stride=1, padding=0, dilation=2)
test = gradcheck(layer, input, eps=1e-6, atol=1e-4)
print('Testing dilatedavgpool2d(): {}'.format(test))
if __name__ == '__main__':
test_aggregateP()
test_scaledL2()
test_encoding()
test_aggregate()
test_residual()
#test_assign()
test_square_squeeze()
#test_square_squeeze()
test_encodingP()
test_sum_square()
test_assign()
test_dilated_avgpool()
"""
test_dilated_densenet()
"""
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment