"tests/vscode:/vscode.git/clone" did not exist on "6459a688ae15d797dd4d0586f2f8ad2e46d58145"
test.py 4.14 KB
Newer Older
Hang Zhang's avatar
test  
Hang Zhang committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree 
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

import encoding
import torch
import torch.nn.functional as F
from torch.autograd import Variable, gradcheck

Hang Zhang's avatar
Hang Zhang committed
16
def test_aggregateP():
Hang Zhang's avatar
test  
Hang Zhang committed
17
18
19
20
21
22
    B,N,K,D = 2,3,4,5
    A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5), 
        requires_grad=True)
    R = Variable(torch.cuda.DoubleTensor(B,N,K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (A, R)
Hang Zhang's avatar
Hang Zhang committed
23
    test = gradcheck(encoding.aggregateP(), input, eps=1e-6, atol=1e-4)
Hang Zhang's avatar
test  
Hang Zhang committed
24
25
26
    print('Testing aggregate(): {}'.format(test))


Hang Zhang's avatar
Hang Zhang committed
27
def test_aggregate():
Hang Zhang's avatar
test  
Hang Zhang committed
28
29
30
31
32
33
34
35
    B,N,K,D = 2,3,4,5
    A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5), 
        requires_grad=True)
    X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (A, X, C)
Hang Zhang's avatar
Hang Zhang committed
36
37
    test = gradcheck(encoding.aggregate(), input, eps=1e-6, atol=1e-4)
    print('Testing aggregate(): {}'.format(test))
Hang Zhang's avatar
test  
Hang Zhang committed
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63


def test_ScaledL2():
    B,N,K,D = 2,3,4,5
    X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X, C, S)
    test = gradcheck(encoding.ScaledL2(), input, eps=1e-6, atol=1e-4)
    print('Testing ScaledL2(): {}'.format(test))


def test_assign():
    B,N,K,D = 2,3,4,5
    X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5), 
        requires_grad=True)

    R = encoding.residual()(X, C)
    A1 = encoding.assign(R, S)
Hang Zhang's avatar
Hang Zhang committed
64
    E1 = encoding.aggregateP()(A1, R)
Hang Zhang's avatar
test  
Hang Zhang committed
65
66

    A2 = F.softmax(encoding.ScaledL2()(X,C,S))
Hang Zhang's avatar
Hang Zhang committed
67
    E2 = encoding.aggregate()(A2, X, C)
Hang Zhang's avatar
test  
Hang Zhang committed
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122

    print('E1', E1)
    print('E2', E2)


def test_residual():
    B,N,K,D = 2,3,4,5
    X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X, C)
    test = gradcheck(encoding.residual(), input, eps=1e-6, atol=1e-4)
    print('Testing residual(): {}'.format(test))


def test_square_squeeze():
    B,N,K,D = 2,3,4,5
    R = Variable(torch.cuda.DoubleTensor(B,N,K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (R,)
    test = gradcheck(encoding.square_squeeze(), input, eps=1e-6, atol=1e-4)
    print('Testing square_squeeze(): {}'.format(test))


def test_encoding():
    B,C,H,W,K = 2,3,4,5,6
    X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X,)
    layer = encoding.Encoding(C,K).double().cuda()
    test = gradcheck(layer, input, eps=1e-6, atol=1e-4)
    print('Testing encoding(): {}'.format(test))
    

def test_encodingP():
    B,C,H,W,K = 2,3,4,5,6
    X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X,)
    layer = encoding.EncodingP(C,K).double().cuda()
    test = gradcheck(layer, input, eps=1e-6, atol=1e-4)
    print('Testing encodingP(): {}'.format(test))


def test_sum_square():
    B,C,H,W = 2,3,4,5
    X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X,)
    test = gradcheck(encoding.sum_square(), input, eps=1e-6, atol=1e-4)
    print('Testing sum_square(): {}'.format(test))


if __name__ == '__main__':
Hang Zhang's avatar
Hang Zhang committed
123
    test_aggregateP()
Hang Zhang's avatar
test  
Hang Zhang committed
124
125
126
127
128
129
130
131
    test_ScaledL2()
    test_encoding() 
    test_aggregate()
    test_residual()
    #test_assign()
    test_square_squeeze()
    test_encodingP()
    test_sum_square()
Hang Zhang's avatar
Hang Zhang committed
132