Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-spline-conv
Commits
9c208e8e
Commit
9c208e8e
authored
Mar 06, 2018
by
Jan Eric Lenssen
Browse files
tests and small fixes
parent
7761cb1d
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
103 additions
and
30 deletions
+103
-30
edgewise_spline_weighting_gpu_test.py
edgewise_spline_weighting_gpu_test.py
+3
-0
spline_conv_gpu.py
spline_conv_gpu.py
+37
-25
spline_conv_test.py
spline_conv_test.py
+56
-5
spline_cubic_gpu_test.py
spline_cubic_gpu_test.py
+2
-0
spline_linear_gpu_test.py
spline_linear_gpu_test.py
+3
-0
spline_quadratic_gpu_test.py
spline_quadratic_gpu_test.py
+2
-0
No files found.
edgewise_spline_weighting_gpu_test.py
View file @
9c208e8e
'''
import unittest
import unittest
import torch
import torch
from torch.autograd import Variable, gradcheck
from torch.autograd import Variable, gradcheck
from numpy.testing import assert_equal
from numpy.testing import assert_equal
from .spline import spline
from .spline import spline
...
@@ -52,3 +54,4 @@ class EdgewiseSplineWeightingGPUTest(unittest.TestCase):
...
@@ -52,3 +54,4 @@ class EdgewiseSplineWeightingGPUTest(unittest.TestCase):
op = EdgewiseSplineWeightingGPU(amount, index)
op = EdgewiseSplineWeightingGPU(amount, index)
test = gradcheck(op, (input, weight), eps=1e-6, atol=1e-4)
test = gradcheck(op, (input, weight), eps=1e-6, atol=1e-4)
self.assertTrue(test)
self.assertTrue(test)
'''
\ No newline at end of file
spline_conv_gpu.py
View file @
9c208e8e
...
@@ -144,19 +144,15 @@ const ${Dtype}* amount, const long* index, int num_threads) {
...
@@ -144,19 +144,15 @@ const ${Dtype}* amount, const long* index, int num_threads) {
// Calculate B-spline basis tensor product gradient
// Calculate B-spline basis tensor product gradient
adj_g += g * f * w;
adj_g += g * f * w;
}
}
atomicAdd(&(grad_amount[e_idx
,
k_idx]), adj_g);
atomicAdd(&(grad_amount[e_idx
*${k_max} +
k_idx]), adj_g);
}
}
}
}
}
}
'''
'''
def
get_weighting_forward_kernel
(
M_in
,
M_out
,
k_max
,
bt_to_adj
=
False
):
def
get_weighting_forward_kernel
(
M_in
,
M_out
,
k_max
):
cuda_tensor
=
torch
.
FloatTensor
([
1
]).
cuda
()
cuda_tensor
=
torch
.
FloatTensor
([
1
]).
cuda
()
if
bt_to_adj
:
kernel
=
_edgewise_spline_weighting_forward_kernel
else
:
kernel
=
_edgewise_spline_weighting_forward_kernel
kernel
=
_edgewise_spline_weighting_forward_kernel
with
torch
.
cuda
.
device_of
(
cuda_tensor
):
with
torch
.
cuda
.
device_of
(
cuda_tensor
):
f_fw
=
load_kernel
(
f_fw
=
load_kernel
(
...
@@ -169,12 +165,11 @@ def get_weighting_forward_kernel(M_in, M_out, k_max, bt_to_adj=False):
...
@@ -169,12 +165,11 @@ def get_weighting_forward_kernel(M_in, M_out, k_max, bt_to_adj=False):
return
f_fw
return
f_fw
def
get_weighting_backward_kernel
(
M_in
,
M_out
,
k_max
,
K
,
b
t
_to_adj
=
False
):
def
get_weighting_backward_kernel
(
M_in
,
M_out
,
k_max
,
K
,
b
p
_to_adj
=
False
):
cuda_tensor
=
torch
.
FloatTensor
([
1
]).
cuda
()
cuda_tensor
=
torch
.
FloatTensor
([
1
]).
cuda
()
if
b
t
_to_adj
:
if
b
p
_to_adj
:
kernel
=
_edgewise_spline_weighting_backward_kernel_bp2adj
kernel
=
_edgewise_spline_weighting_backward_kernel_bp2adj
else
:
else
:
kernel
=
_edgewise_spline_weighting_backward_kernel
kernel
=
_edgewise_spline_weighting_backward_kernel
with
torch
.
cuda
.
device_of
(
cuda_tensor
):
with
torch
.
cuda
.
device_of
(
cuda_tensor
):
f_bw
=
load_kernel
(
f_bw
=
load_kernel
(
...
@@ -199,7 +194,7 @@ const long* kernel_size, const long* is_open_spline, int num_threads) {
...
@@ -199,7 +194,7 @@ const long* kernel_size, const long* is_open_spline, int num_threads) {
const int e_idx = idx / ${k_max};
const int e_idx = idx / ${k_max};
int k_idx = idx % ${k_max};
int k_idx = idx % ${k_max};
int K =
1.0
;
int K =
${K}
;
int k_idx_mod;
int k_idx_mod;
int bot;
int bot;
int top;
int top;
...
@@ -209,6 +204,7 @@ const long* kernel_size, const long* is_open_spline, int num_threads) {
...
@@ -209,6 +204,7 @@ const long* kernel_size, const long* is_open_spline, int num_threads) {
long i = 0;
long i = 0;
for (int d_idx = 0; d_idx < ${dim}; d_idx++) {
for (int d_idx = 0; d_idx < ${dim}; d_idx++) {
K/=kernel_size[d_idx];
k_idx_mod = k_idx % 2;
k_idx_mod = k_idx % 2;
k_idx >>= 1;
k_idx >>= 1;
...
@@ -224,7 +220,6 @@ const long* kernel_size, const long* is_open_spline, int num_threads) {
...
@@ -224,7 +220,6 @@ const long* kernel_size, const long* is_open_spline, int num_threads) {
top = (bot + 1) % kernel_size[d_idx];
top = (bot + 1) % kernel_size[d_idx];
bot %= kernel_size[d_idx];
bot %= kernel_size[d_idx];
i += ((1 - k_idx_mod) * bot + k_idx_mod * top) * K;
i += ((1 - k_idx_mod) * bot + k_idx_mod * top) * K;
K *= kernel_size[d_idx];
}
}
amount[idx] = a;
amount[idx] = a;
...
@@ -360,13 +355,13 @@ int num_threads) {
...
@@ -360,13 +355,13 @@ int num_threads) {
grad_out += grad_amount[a_idx]*amount[a_idx]/residual;
grad_out += grad_amount[a_idx]*amount[a_idx]/residual;
}
}
grad_adj[e_idx*
${dim} + d_idx] = grad_out;
grad_adj[e_idx*${dim} + d_idx] = grad_out;
}
}
}
}
'''
'''
def
get_basis_kernel
(
k_max
,
K
,
dim
,
degree
,
bt_to_adj
=
False
):
def
get_basis_kernel
(
k_max
,
K
,
dim
,
degree
):
if
degree
==
3
:
if
degree
==
3
:
_spline_kernel
=
_spline_kernel_cubic
_spline_kernel
=
_spline_kernel_cubic
elif
degree
==
2
:
elif
degree
==
2
:
...
@@ -388,9 +383,9 @@ def get_basis_kernel(k_max, K, dim, degree, bt_to_adj=False):
...
@@ -388,9 +383,9 @@ def get_basis_kernel(k_max, K, dim, degree, bt_to_adj=False):
def
get_basis_backward_kernel
(
k_max
,
K
,
dim
,
degree
):
def
get_basis_backward_kernel
(
k_max
,
K
,
dim
,
degree
):
if
degree
==
3
:
if
degree
==
3
:
_spline_kernel
=
_spline_kernel_cubic
raise
NotImplementedError
elif
degree
==
2
:
elif
degree
==
2
:
_spline_kernel
=
_spline_kernel_quadratic
raise
NotImplementedError
else
:
else
:
_spline_kernel
=
_spline_kernel_linear_backward
_spline_kernel
=
_spline_kernel_linear_backward
...
@@ -423,15 +418,20 @@ class SplineConvGPU(Function):
...
@@ -423,15 +418,20 @@ class SplineConvGPU(Function):
def
forward
(
self
,
input
,
weight
,
adj_values
):
def
forward
(
self
,
input
,
weight
,
adj_values
):
assert
input
.
is_cuda
and
weight
.
is_cuda
assert
input
.
is_cuda
and
weight
.
is_cuda
self
.
K
,
self
.
M_in
,
self
.
M_out
=
weight
.
size
()
self
.
K
,
self
.
M_in
,
self
.
M_out
=
weight
.
size
()
# Compute B-spline basis tensor products
# Compute B-spline basis tensor products
adj_values
=
adj_values
.
unsqueeze
(
1
)
if
len
(
adj_values
.
size
())
<
2
\
adj_values
=
adj_values
.
unsqueeze
(
1
)
if
len
(
adj_values
.
size
())
<
2
\
else
adj_values
else
adj_values
if
self
.
bp_to_adj
:
self
.
save_for_backward
(
input
,
weight
,
adj_values
)
#adj_values = torch.clamp(adj_values,min=0.0,max=1.0)
else
:
self
.
save_for_backward
(
input
,
weight
)
num_edges
,
dim
=
adj_values
.
size
()
num_edges
,
dim
=
adj_values
.
size
()
k_max
=
2
**
dim
k_max
=
2
**
dim
amount
=
adj_values
.
new
(
num_edges
,
k_max
)
amount
=
adj_values
.
new
(
num_edges
,
k_max
)
index
=
adj_values
.
new
(
num_edges
,
k_max
).
long
()
index
=
adj_values
.
new
(
num_edges
,
k_max
).
long
()
num_threads
=
amount
.
numel
()
num_threads
=
amount
.
numel
()
...
@@ -445,7 +445,8 @@ class SplineConvGPU(Function):
...
@@ -445,7 +445,8 @@ class SplineConvGPU(Function):
amount
.
data_ptr
(),
amount
.
data_ptr
(),
index
.
data_ptr
(),
index
.
data_ptr
(),
self
.
kernel_size
.
data_ptr
(),
self
.
kernel_size
.
data_ptr
(),
self
.
is_open_spline
.
data_ptr
(),
num_threads
self
.
is_open_spline
.
data_ptr
(),
num_threads
],
],
stream
=
Stream
(
ptr
=
torch
.
cuda
.
current_stream
().
cuda_stream
))
stream
=
Stream
(
ptr
=
torch
.
cuda
.
current_stream
().
cuda_stream
))
...
@@ -462,14 +463,12 @@ class SplineConvGPU(Function):
...
@@ -462,14 +463,12 @@ class SplineConvGPU(Function):
weight
.
data_ptr
(),
weight
.
data_ptr
(),
output
.
data_ptr
(),
output
.
data_ptr
(),
amount
.
data_ptr
(),
amount
.
data_ptr
(),
index
.
data_ptr
(),
num_threads
index
.
data_ptr
(),
num_threads
],
],
stream
=
Stream
(
ptr
=
torch
.
cuda
.
current_stream
().
cuda_stream
))
stream
=
Stream
(
ptr
=
torch
.
cuda
.
current_stream
().
cuda_stream
))
if
self
.
bp_to_adj
:
self
.
save_for_backward
(
input
,
weight
,
adj_values
)
else
:
self
.
save_for_backward
(
input
,
weight
)
self
.
amount
=
amount
self
.
amount
=
amount
self
.
index
=
index
self
.
index
=
index
...
@@ -477,16 +476,19 @@ class SplineConvGPU(Function):
...
@@ -477,16 +476,19 @@ class SplineConvGPU(Function):
return
output
return
output
def
backward
(
self
,
grad_output
):
def
backward
(
self
,
grad_output
):
print
(
'grad_output:'
,
grad_output
.
min
(),
grad_output
.
max
())
grad_input
=
grad_output
.
new
(
grad_output
.
size
(
0
),
self
.
M_in
).
fill_
(
0
)
grad_input
=
grad_output
.
new
(
grad_output
.
size
(
0
),
self
.
M_in
).
fill_
(
0
)
grad_weight
=
grad_output
.
new
(
self
.
K
,
self
.
M_in
,
self
.
M_out
).
fill_
(
0
)
grad_weight
=
grad_output
.
new
(
self
.
K
,
self
.
M_in
,
self
.
M_out
).
fill_
(
0
)
num_threads
=
grad_output
.
numel
()
num_threads
=
grad_output
.
numel
()
if
self
.
bp_to_adj
:
if
self
.
bp_to_adj
:
input
,
weight
,
adj_values
=
self
.
saved_tensors
input
,
weight
,
adj_values
=
self
.
saved_tensors
#adj_values = torch.clamp(adj_values,min=0.0,max=1.0)
amount
=
self
.
amount
amount
=
self
.
amount
index
=
self
.
index
index
=
self
.
index
grad_amount
=
grad_output
.
new
(
amount
.
size
(
0
),
grad_amount
=
grad_output
.
new
(
amount
.
size
(
0
),
amount
.
size
(
1
)).
fill_
(
0
)
amount
.
size
(
1
)).
fill_
(
0
)
with
torch
.
cuda
.
device_of
(
grad_output
):
with
torch
.
cuda
.
device_of
(
grad_output
):
self
.
f_weighting_bw
(
self
.
f_weighting_bw
(
block
=
(
cuda_num_threads
,
1
,
1
),
block
=
(
cuda_num_threads
,
1
,
1
),
...
@@ -505,6 +507,7 @@ class SplineConvGPU(Function):
...
@@ -505,6 +507,7 @@ class SplineConvGPU(Function):
grad_adj
=
grad_amount
.
new
(
grad_amount
.
size
(
0
),
grad_adj
=
grad_amount
.
new
(
grad_amount
.
size
(
0
),
self
.
kernel_size
.
size
(
0
)).
fill_
(
0
)
self
.
kernel_size
.
size
(
0
)).
fill_
(
0
)
num_threads
=
grad_adj
.
numel
()
num_threads
=
grad_adj
.
numel
()
with
torch
.
cuda
.
device_of
(
grad_amount
):
with
torch
.
cuda
.
device_of
(
grad_amount
):
...
@@ -522,12 +525,19 @@ class SplineConvGPU(Function):
...
@@ -522,12 +525,19 @@ class SplineConvGPU(Function):
],
],
stream
=
Stream
(
ptr
=
torch
.
cuda
.
current_stream
().
cuda_stream
))
stream
=
Stream
(
ptr
=
torch
.
cuda
.
current_stream
().
cuda_stream
))
return
grad_input
,
grad_weight
,
None
#print('grad_input:',grad_input.min(), grad_input.max())
#print('grad_weight:',grad_weight[:,:,-1].min(), grad_weight[:,:,-1].max())
#print('grad_amount:',grad_amount.min(), grad_amount.max())
#print('grad_adj:',grad_adj.min(), grad_adj.max())
return
grad_input
,
grad_weight
,
grad_adj
else
:
else
:
input
,
weight
=
self
.
saved_tensors
input
,
weight
=
self
.
saved_tensors
amount
=
self
.
amount
amount
=
self
.
amount
index
=
self
.
index
index
=
self
.
index
grad_amount
=
grad_output
.
new
(
amount
.
size
(
0
),
amount
.
size
(
1
)).
fill_
(
0
)
with
torch
.
cuda
.
device_of
(
grad_output
):
with
torch
.
cuda
.
device_of
(
grad_output
):
self
.
f_weighting_bw
(
self
.
f_weighting_bw
(
block
=
(
cuda_num_threads
,
1
,
1
),
block
=
(
cuda_num_threads
,
1
,
1
),
...
@@ -536,6 +546,7 @@ class SplineConvGPU(Function):
...
@@ -536,6 +546,7 @@ class SplineConvGPU(Function):
grad_output
.
data_ptr
(),
grad_output
.
data_ptr
(),
grad_input
.
data_ptr
(),
grad_input
.
data_ptr
(),
grad_weight
.
data_ptr
(),
grad_weight
.
data_ptr
(),
grad_amount
.
data_ptr
(),
input
.
data_ptr
(),
input
.
data_ptr
(),
weight
.
data_ptr
(),
weight
.
data_ptr
(),
amount
.
data_ptr
(),
amount
.
data_ptr
(),
...
@@ -543,4 +554,5 @@ class SplineConvGPU(Function):
...
@@ -543,4 +554,5 @@ class SplineConvGPU(Function):
],
],
stream
=
Stream
(
ptr
=
torch
.
cuda
.
current_stream
().
cuda_stream
))
stream
=
Stream
(
ptr
=
torch
.
cuda
.
current_stream
().
cuda_stream
))
return
grad_input
,
grad_weight
,
None
return
grad_input
,
grad_weight
,
None
spline_conv_test.py
View file @
9c208e8e
...
@@ -2,10 +2,12 @@ from __future__ import division
...
@@ -2,10 +2,12 @@ from __future__ import division
import
unittest
import
unittest
import
torch
import
torch
from
torch.autograd
import
Variable
from
torch.autograd
import
Variable
,
gradcheck
from
numpy.testing
import
assert_almost_equal
from
numpy.testing
import
assert_almost_equal
from
.spline_conv
import
spline_conv
from
.spline_conv
import
spline_conv
from
.spline_conv_gpu
import
get_basis_kernel
,
get_basis_backward_kernel
,
\
get_weighting_forward_kernel
,
get_weighting_backward_kernel
,
SplineConvGPU
class
SplineConvTest
(
unittest
.
TestCase
):
class
SplineConvTest
(
unittest
.
TestCase
):
...
@@ -14,7 +16,9 @@ class SplineConvTest(unittest.TestCase):
...
@@ -14,7 +16,9 @@ class SplineConvTest(unittest.TestCase):
edges
=
torch
.
LongTensor
([[
0
,
0
,
0
,
0
],
[
1
,
2
,
3
,
4
]])
edges
=
torch
.
LongTensor
([[
0
,
0
,
0
,
0
],
[
1
,
2
,
3
,
4
]])
values
=
[[
0.25
,
0.125
],
[
0.25
,
0.375
],
[
0.75
,
0.625
],
[
0.75
,
0.875
]]
values
=
[[
0.25
,
0.125
],
[
0.25
,
0.375
],
[
0.75
,
0.625
],
[
0.75
,
0.875
]]
values
=
torch
.
FloatTensor
(
values
)
values
=
torch
.
FloatTensor
(
values
)
adj
=
torch
.
sparse
.
FloatTensor
(
edges
,
values
,
torch
.
Size
([
5
,
5
,
2
]))
adj
=
{
'indices'
:
edges
.
cuda
(),
'values'
:
Variable
(
values
.
cuda
()),
'size'
:
torch
.
Size
([
5
,
5
,
2
])}
kernel_size
=
torch
.
cuda
.
LongTensor
([
3
,
4
])
kernel_size
=
torch
.
cuda
.
LongTensor
([
3
,
4
])
is_open_spline
=
torch
.
cuda
.
LongTensor
([
1
,
0
])
is_open_spline
=
torch
.
cuda
.
LongTensor
([
1
,
0
])
...
@@ -22,11 +26,26 @@ class SplineConvTest(unittest.TestCase):
...
@@ -22,11 +26,26 @@ class SplineConvTest(unittest.TestCase):
input
=
torch
.
FloatTensor
([[
9
,
10
],
[
1
,
2
],
[
3
,
4
],
[
5
,
6
],
[
7
,
8
]])
input
=
torch
.
FloatTensor
([[
9
,
10
],
[
1
,
2
],
[
3
,
4
],
[
5
,
6
],
[
7
,
8
]])
weight
=
torch
.
arange
(
0.5
,
0.5
*
27
,
step
=
0.5
).
view
(
13
,
2
,
1
)
weight
=
torch
.
arange
(
0.5
,
0.5
*
27
,
step
=
0.5
).
view
(
13
,
2
,
1
)
adj
,
input
,
weight
=
adj
.
cuda
(),
input
.
cuda
(),
weight
.
cuda
()
input
,
weight
=
input
.
cuda
(),
weight
.
cuda
()
input
,
weight
=
Variable
(
input
),
Variable
(
weight
)
input
,
weight
=
Variable
(
input
),
Variable
(
weight
)
K
=
12
in_features
=
2
out_features
=
1
degree
=
1
dim
=
2
k_max
=
(
degree
+
1
)
**
dim
fw_k
=
get_weighting_forward_kernel
(
in_features
,
out_features
,
k_max
)
bw_k
=
get_weighting_backward_kernel
(
in_features
,
out_features
,
k_max
,
K
,
True
)
basis_fw_k
=
get_basis_kernel
(
k_max
,
K
,
dim
,
degree
)
basis_bw_k
=
get_basis_backward_kernel
(
k_max
,
K
,
dim
,
degree
)
output
=
spline_conv
(
output
=
spline_conv
(
adj
,
input
,
weight
,
kernel_size
,
is_open_spline
,
K
=
12
,
degree
=
1
)
adj
,
input
,
weight
,
kernel_size
,
is_open_spline
,
K
,
fw_k
,
bw_k
,
basis_fw_k
,
basis_bw_k
,
bp_to_adj
=
True
)
expected_output
=
[
expected_output
=
[
[(
12.5
*
9
+
13
*
10
+
266
)
/
4
],
[(
12.5
*
9
+
13
*
10
+
266
)
/
4
],
...
@@ -35,5 +54,37 @@ class SplineConvTest(unittest.TestCase):
...
@@ -35,5 +54,37 @@ class SplineConvTest(unittest.TestCase):
[
12.5
*
5
+
13
*
6
],
[
12.5
*
5
+
13
*
6
],
[
12.5
*
7
+
13
*
8
],
[
12.5
*
7
+
13
*
8
],
]
]
assert_almost_equal
(
output
.
cpu
().
data
.
numpy
(),
expected_output
,
1
)
assert_almost_equal
(
output
.
cpu
().
data
.
numpy
(),
expected_output
,
1
)
@
unittest
.
skipIf
(
not
torch
.
cuda
.
is_available
(),
'no GPU'
)
def
test_backward
(
self
):
kernel_size
=
torch
.
cuda
.
LongTensor
([
3
,
4
])
is_open_spline
=
torch
.
cuda
.
LongTensor
([
1
,
0
])
input
=
torch
.
randn
(
4
,
2
).
double
().
cuda
()
weight
=
torch
.
randn
(
12
,
2
,
1
).
double
().
cuda
()
values
=
torch
.
randn
(
4
,
2
).
double
().
cuda
()
input
=
Variable
(
input
,
requires_grad
=
True
)
weight
=
Variable
(
weight
,
requires_grad
=
True
)
values
=
Variable
(
values
,
requires_grad
=
True
)
K
=
12
in_features
=
2
out_features
=
1
degree
=
1
dim
=
2
k_max
=
(
degree
+
1
)
**
dim
fw_k
=
get_weighting_forward_kernel
(
in_features
,
out_features
,
k_max
)
bw_k
=
get_weighting_backward_kernel
(
in_features
,
out_features
,
k_max
,
K
,
bp_to_adj
=
True
)
basis_fw_k
=
get_basis_kernel
(
k_max
,
K
,
dim
,
degree
)
basis_bw_k
=
get_basis_backward_kernel
(
k_max
,
K
,
dim
,
degree
)
op
=
SplineConvGPU
(
kernel_size
,
is_open_spline
,
K
,
degree
,
basis_fw_k
,
basis_bw_k
,
fw_k
,
bw_k
,
bp_to_adj
=
True
)
test
=
gradcheck
(
op
,
(
input
,
weight
,
values
),
eps
=
1e-6
,
atol
=
1e-4
)
print
(
test
)
self
.
assertTrue
(
test
)
\ No newline at end of file
spline_cubic_gpu_test.py
View file @
9c208e8e
'''
import unittest
import unittest
import torch
import torch
...
@@ -62,3 +63,4 @@ class SplineQuadraticGPUTest(unittest.TestCase):
...
@@ -62,3 +63,4 @@ class SplineQuadraticGPUTest(unittest.TestCase):
assert_almost_equal(a1.cpu().numpy(), a2, 4)
assert_almost_equal(a1.cpu().numpy(), a2, 4)
assert_equal(i1.cpu().numpy(), i2)
assert_equal(i1.cpu().numpy(), i2)
'''
\ No newline at end of file
spline_linear_gpu_test.py
View file @
9c208e8e
'''
import unittest
import unittest
import torch
import torch
...
@@ -44,3 +45,5 @@ class SplineLinearGPUTest(unittest.TestCase):
...
@@ -44,3 +45,5 @@ class SplineLinearGPUTest(unittest.TestCase):
assert_almost_equal(a1.cpu().numpy(), a2, 2)
assert_almost_equal(a1.cpu().numpy(), a2, 2)
assert_equal(i1.cpu().numpy(), i2)
assert_equal(i1.cpu().numpy(), i2)
'''
\ No newline at end of file
spline_quadratic_gpu_test.py
View file @
9c208e8e
'''
import unittest
import unittest
import torch
import torch
...
@@ -49,3 +50,4 @@ class SplineQuadraticGPUTest(unittest.TestCase):
...
@@ -49,3 +50,4 @@ class SplineQuadraticGPUTest(unittest.TestCase):
assert_almost_equal(a1.cpu().numpy(), a2, 2)
assert_almost_equal(a1.cpu().numpy(), a2, 2)
assert_equal(i1.cpu().numpy(), i2)
assert_equal(i1.cpu().numpy(), i2)
'''
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment