test.py 5.01 KB
Newer Older
Hang Zhang's avatar
test  
Hang Zhang committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree 
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

import encoding
import torch
import torch.nn.functional as F
from torch.autograd import Variable, gradcheck

Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
16
17
18
19
import torchvision.models as models

EPS = 1e-6

Hang Zhang's avatar
Hang Zhang committed
20
def test_aggregateP():
Hang Zhang's avatar
test  
Hang Zhang committed
21
22
23
24
25
26
    B,N,K,D = 2,3,4,5
    A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5), 
        requires_grad=True)
    R = Variable(torch.cuda.DoubleTensor(B,N,K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (A, R)
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
27
    test = gradcheck(encoding.functions.aggregateP, input, eps=1e-6, atol=1e-4)
Hang Zhang's avatar
test  
Hang Zhang committed
28
29
30
    print('Testing aggregate(): {}'.format(test))


Hang Zhang's avatar
Hang Zhang committed
31
def test_aggregate():
Hang Zhang's avatar
test  
Hang Zhang committed
32
33
34
35
36
37
38
39
    B,N,K,D = 2,3,4,5
    A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5), 
        requires_grad=True)
    X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (A, X, C)
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
40
    test = gradcheck(encoding.functions.aggregate, input, eps=1e-6, atol=1e-4)
Hang Zhang's avatar
Hang Zhang committed
41
    print('Testing aggregate(): {}'.format(test))
Hang Zhang's avatar
test  
Hang Zhang committed
42
43


Hang Zhang's avatar
Hang Zhang committed
44
def test_scaledL2():
Hang Zhang's avatar
test  
Hang Zhang committed
45
46
47
48
49
50
51
52
    B,N,K,D = 2,3,4,5
    X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X, C, S)
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
53
    test = gradcheck(encoding.functions.scaledL2, input, eps=1e-6, atol=1e-4)
Hang Zhang's avatar
Hang Zhang committed
54
    print('Testing scaledL2(): {}'.format(test))
Hang Zhang's avatar
test  
Hang Zhang committed
55
56
57
58
59
60
61
62
63
64
65


def test_assign():
    B,N,K,D = 2,3,4,5
    X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5), 
        requires_grad=True)

Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
66
67
68
    R = encoding.functions.residual(X, C)
    A1 = encoding.functions.assign(R, S)
    E1 = encoding.functions.aggregateP(A1, R)
Hang Zhang's avatar
test  
Hang Zhang committed
69

Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
70
71
    A2 = F.softmax(encoding.functions.scaledL2(X,C,S))
    E2 = encoding.functions.aggregate(A2, X, C)
Hang Zhang's avatar
test  
Hang Zhang committed
72

Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
73
    print('Testing assign(): {}'.format((E1-E2).norm(2).data[0] < EPS))
Hang Zhang's avatar
test  
Hang Zhang committed
74
75
76
77
78
79
80
81
82


def test_residual():
    B,N,K,D = 2,3,4,5
    X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X, C)
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
83
    test = gradcheck(encoding.functions.residual, input, eps=1e-6, atol=1e-4)
Hang Zhang's avatar
test  
Hang Zhang committed
84
85
86
    print('Testing residual(): {}'.format(test))


Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
87
"""
Hang Zhang's avatar
test  
Hang Zhang committed
88
89
90
91
92
def test_square_squeeze():
    B,N,K,D = 2,3,4,5
    R = Variable(torch.cuda.DoubleTensor(B,N,K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (R,)
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
93
    test = gradcheck(encoding.functions.square_squeeze(), input, eps=1e-6, atol=1e-4)
Hang Zhang's avatar
test  
Hang Zhang committed
94
    print('Testing square_squeeze(): {}'.format(test))
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
95
"""
Hang Zhang's avatar
test  
Hang Zhang committed
96
97
98
99
100
101
102


def test_encoding():
    B,C,H,W,K = 2,3,4,5,6
    X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X,)
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
103
    layer = encoding.nn.Encoding(C,K).double().cuda()
Hang Zhang's avatar
test  
Hang Zhang committed
104
105
106
107
108
109
110
111
112
    test = gradcheck(layer, input, eps=1e-6, atol=1e-4)
    print('Testing encoding(): {}'.format(test))
    

def test_encodingP():
    B,C,H,W,K = 2,3,4,5,6
    X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X,)
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
113
    layer = encoding.nn.EncodingP(C,K).double().cuda()
Hang Zhang's avatar
test  
Hang Zhang committed
114
115
116
117
118
119
120
121
122
    test = gradcheck(layer, input, eps=1e-6, atol=1e-4)
    print('Testing encodingP(): {}'.format(test))


def test_sum_square():
    B,C,H,W = 2,3,4,5
    X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X,)
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
123
    test = gradcheck(encoding.functions.sum_square, input, eps=1e-6, atol=1e-4)
Hang Zhang's avatar
test  
Hang Zhang committed
124
125
126
    print('Testing sum_square(): {}'.format(test))


Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
def test_dilated_densenet():
    net = encoding.dilated.densenet161(True).cuda().eval()
    print(net)
    net2 = models.densenet161(True).cuda().eval()

    x=Variable(torch.Tensor(1,3,224,224).uniform_(-0.5,0.5)).cuda()
    y = net.features(x)
    y2 = net2.features(x)

    print(y[0][0])
    print(y2[0][0])


def test_dilated_avgpool():
    X = Variable(torch.cuda.FloatTensor(1,3,75,75).uniform_(-0.5,0.5))
    input = (X,)
    layer = encoding.nn.DilatedAvgPool2d(kernel_size=2, stride=1, padding=0, dilation=2)
    test = gradcheck(layer, input, eps=1e-6, atol=1e-4)
    print('Testing dilatedavgpool2d(): {}'.format(test))


Hang Zhang's avatar
test  
Hang Zhang committed
148
if __name__ == '__main__':
Hang Zhang's avatar
Hang Zhang committed
149
    test_aggregateP()
Hang Zhang's avatar
Hang Zhang committed
150
    test_scaledL2()
Hang Zhang's avatar
test  
Hang Zhang committed
151
152
153
    test_encoding() 
    test_aggregate()
    test_residual()
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
154
    #test_square_squeeze()
Hang Zhang's avatar
test  
Hang Zhang committed
155
156
    test_encodingP()
    test_sum_square()
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
157
158
159
160
161
    test_assign()
    test_dilated_avgpool()
    """
    test_dilated_densenet()
    """