Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
SparseConvNet
Commits
54c58b5f
Commit
54c58b5f
authored
Mar 08, 2019
by
Benjamin Thomas Graham
Browse files
minor
parent
dcd1428d
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
239 additions
and
45 deletions
+239
-45
examples/nyu2/data/prepare_data.py
examples/nyu2/data/prepare_data.py
+122
-0
sparseconvnet/SCN/CUDA/AveragePooling.cu
sparseconvnet/SCN/CUDA/AveragePooling.cu
+20
-19
sparseconvnet/SCN/Metadata/Metadata.cpp
sparseconvnet/SCN/Metadata/Metadata.cpp
+8
-13
sparseconvnet/SCN/Metadata/Metadata.h
sparseconvnet/SCN/Metadata/Metadata.h
+6
-1
sparseconvnet/networkArchitectures.py
sparseconvnet/networkArchitectures.py
+6
-8
sparseconvnet/sparsify.py
sparseconvnet/sparsify.py
+71
-3
sparseconvnet/tables.py
sparseconvnet/tables.py
+1
-1
sparseconvnet/utils.py
sparseconvnet/utils.py
+5
-0
No files found.
examples/nyu2/data/prepare_data.py
0 → 100644
View file @
54c58b5f
import
numpy
as
np
import
torch
import
glob
,
math
,
os
import
scipy.io
import
h5py
import
pickle
classes
=
[
'wall'
,
'floor'
,
'cabinet'
,
'bed'
,
'chair'
,
'sofa'
,
'table'
,
'door'
,
'window'
,
'bookshelf'
,
'picture'
,
'counter'
,
'blinds'
,
'desk'
,
'shelves'
,
'curtain'
,
'dresser'
,
'pillow'
,
'mirror'
,
'floor mat'
,
'clothes'
,
'ceiling'
,
'books'
,
'refridgerator'
,
'television'
,
'paper'
,
'towel'
,
'shower curtain'
,
'box'
,
'whiteboard'
,
'person'
,
'night stand'
,
'toilet'
,
'sink'
,
'lamp'
,
'bathtub'
,
'bag'
,
'otherstructure'
,
'otherfurniture'
,
'otherprop'
]
corresponding_classes_in_Silberman_labeling
=
[
40
,
40
,
3
,
22
,
5
,
40
,
12
,
38
,
40
,
40
,
2
,
39
,
40
,
40
,
26
,
40
,
24
,
40
,
7
,
40
,
1
,
40
,
40
,
34
,
38
,
29
,
40
,
8
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
14
,
40
,
38
,
40
,
40
,
40
,
15
,
39
,
40
,
30
,
40
,
40
,
39
,
40
,
39
,
38
,
40
,
38
,
40
,
37
,
40
,
38
,
38
,
9
,
40
,
40
,
38
,
40
,
11
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
13
,
40
,
40
,
6
,
40
,
23
,
40
,
39
,
10
,
16
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
39
,
40
,
40
,
40
,
40
,
39
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
18
,
40
,
40
,
19
,
28
,
33
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
27
,
36
,
40
,
40
,
40
,
40
,
21
,
40
,
20
,
35
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
4
,
32
,
40
,
40
,
39
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
17
,
40
,
40
,
25
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
38
,
38
,
40
,
40
,
39
,
40
,
39
,
40
,
38
,
39
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
38
,
40
,
40
,
38
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
38
,
40
,
40
,
39
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
31
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
38
,
39
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
39
,
40
,
40
,
39
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
38
,
40
,
39
,
40
,
40
,
40
,
39
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
39
,
40
,
40
,
39
,
39
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
38
,
39
,
39
,
40
,
39
,
40
,
39
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
38
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
39
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
39
,
40
,
40
,
38
,
39
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
39
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
38
,
39
,
40
,
38
,
39
,
40
,
39
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
39
,
40
,
40
,
40
,
39
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
38
,
40
,
40
,
38
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
38
,
38
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
38
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
38
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
39
,
40
,
40
,
40
,
40
,
38
,
38
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
39
,
40
,
40
,
39
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
39
,
39
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
38
,
40
,
39
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
38
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
40
,
39
,
40
,
40
]
print
(
len
(
classes
),
len
(
corresponding_classes_in_Silberman_labeling
))
split
=
scipy
.
io
.
loadmat
(
'splits.mat'
)[
'testNdxs'
]
-
1
# 0-index
testIdxs
=
[
x
for
x
in
range
(
1449
)
if
x
in
split
]
trainIdxs
=
[
x
for
x
in
range
(
1449
)
if
x
not
in
split
]
print
(
len
(
trainIdxs
),
len
(
testIdxs
))
f
=
h5py
.
File
(
'nyu_depth_v2_labeled.mat'
,
'r'
)
for
i
,
x
in
enumerate
(
trainIdxs
):
tc
=
f
.
get
(
'images'
)[
x
]
td
=
f
.
get
(
'depths'
)[
x
]
*
100
td
-=
td
.
mean
()
print
(
td
.
std
())
gt
=
np
.
array
(
f
.
get
(
'labels'
)[
x
],
dtype
=
'int16'
)
-
1
coords
=
[]
col
=
[]
cl
=
[]
for
x
in
range
(
40
,
600
):
for
y
in
range
(
45
,
470
):
if
gt
[
x
,
y
]
>=
0
:
cl
.
append
(
corresponding_classes_in_Silberman_labeling
[
gt
[
x
,
y
]]
-
1
)
coords
.
append
([
x
-
320
,
y
-
240
,
td
[
x
,
y
]])
col
.
append
([
255
,
tc
[
0
,
x
,
y
],
tc
[
1
,
x
,
y
],
tc
[
2
,
x
,
y
]])
coords
=
np
.
array
(
coords
,
dtype
=
'int16'
)
col
=
np
.
array
(
col
,
dtype
=
'uint8'
)
cl
=
np
.
array
(
cl
,
dtype
=
'int8'
)
print
(
coords
.
shape
,
col
.
shape
,
cl
.
shape
)
pickle
.
dump
([
coords
,
col
,
cl
],
open
(
'train'
+
str
(
i
)
+
'.pickle'
,
'wb'
),
protocol
=
pickle
.
HIGHEST_PROTOCOL
)
f
=
h5py
.
File
(
'nyu_depth_v2_labeled.mat'
,
'r'
)
for
i
,
x
in
enumerate
(
testIdxs
):
tc
=
f
.
get
(
'images'
)[
x
]
td
=
f
.
get
(
'depths'
)[
x
]
*
100
td
-=
td
.
mean
()
print
(
td
.
std
())
gt
=
np
.
array
(
f
.
get
(
'labels'
)[
x
],
dtype
=
'int16'
)
-
1
coords
=
[]
col
=
[]
cl
=
[]
for
x
in
range
(
40
,
600
):
for
y
in
range
(
45
,
470
):
if
gt
[
x
,
y
]
>=
0
:
cl
.
append
(
corresponding_classes_in_Silberman_labeling
[
gt
[
x
,
y
]]
-
1
)
coords
.
append
([
x
-
320
,
y
-
240
,
td
[
x
,
y
]])
col
.
append
([
255
,
tc
[
0
,
x
,
y
],
tc
[
1
,
x
,
y
],
tc
[
2
,
x
,
y
]])
coords
=
np
.
array
(
coords
,
dtype
=
'int16'
)
col
=
np
.
array
(
col
,
dtype
=
'uint8'
)
cl
=
np
.
array
(
cl
,
dtype
=
'int8'
)
print
(
coords
.
shape
,
col
.
shape
,
cl
.
shape
)
pickle
.
dump
([
coords
,
col
,
cl
],
open
(
'test'
+
str
(
i
)
+
'.pickle'
,
'wb'
),
protocol
=
pickle
.
HIGHEST_PROTOCOL
)
sparseconvnet/SCN/CUDA/AveragePooling.cu
View file @
54c58b5f
...
@@ -9,23 +9,24 @@
...
@@ -9,23 +9,24 @@
// NTX must be >=2 so r is filled properly
// NTX must be >=2 so r is filled properly
template
<
typename
T
,
Int
NTX
,
Int
NTY
>
template
<
typename
T
,
Int
NTX
,
Int
NTY
>
__global__
void
AveragePooling_fp
(
T
*
input_features
,
T
*
output_features
,
__global__
void
AveragePooling_fp
(
T
*
input_features
,
T
*
output_features
,
Int
nPlanes
,
Int
input_stride
,
Int
nPlanes
,
Int
input_stride
,
Int
output_stride
,
Int
*
rules
,
Int
nHot
,
Int
output_stride
,
Int
*
rules
,
Int
nHot
,
T
alpha
)
{
T
alpha
)
{
__shared__
Int
r
[
NTY
*
2
];
__shared__
Int
r
[
NTY
*
2
];
for
(
Int
n
=
blockIdx
.
x
*
NTY
;
n
<
nHot
;
n
+=
gridDim
.
x
*
NTY
)
{
for
(
Int
n
=
blockIdx
.
x
*
NTY
;
n
<
nHot
;
n
+=
gridDim
.
x
*
NTY
)
{
{
{
Int
i
=
threadIdx
.
x
+
NTX
*
threadIdx
.
y
;
Int
i
=
threadIdx
.
x
+
NTX
*
threadIdx
.
y
;
if
(
i
<
NTY
*
2
and
i
<
2
*
(
nHot
-
n
))
if
(
i
<
NTY
*
2
and
i
<
2
*
(
nHot
-
n
))
r
[
i
]
=
rules
[
2
*
n
+
i
];
r
[
i
]
=
rules
[
2
*
n
+
i
];
}
}
__syncthreads
();
__syncthreads
();
if
(
n
+
threadIdx
.
y
<
nHot
)
{
if
(
n
+
threadIdx
.
y
<
nHot
)
{
Int
i
=
r
[
2
*
threadIdx
.
y
]
*
input_stride
;
Int
i
=
r
[
2
*
threadIdx
.
y
]
*
input_stride
;
Int
o
=
r
[
2
*
threadIdx
.
y
+
1
]
*
output_stride
;
Int
o
=
r
[
2
*
threadIdx
.
y
+
1
]
*
output_stride
;
for
(
Int
plane
=
threadIdx
.
x
;
plane
<
nPlanes
;
plane
+=
NTX
)
for
(
Int
plane
=
threadIdx
.
x
;
plane
<
nPlanes
;
plane
+=
NTX
)
atomicAdd
(
&
output_features
[
o
+
plane
],
output_features
[
o
+
plane
]
+=
alpha
*
input_features
[
i
+
plane
];
alpha
*
input_features
[
i
+
plane
]);
// atomicAdd(&output_features[o + plane],
// alpha * input_features[i + plane]);
}
}
__syncthreads
();
__syncthreads
();
}
}
...
@@ -33,32 +34,32 @@ __global__ void AveragePooling_fp(T *input_features, T *output_features,
...
@@ -33,32 +34,32 @@ __global__ void AveragePooling_fp(T *input_features, T *output_features,
template
<
typename
T
>
template
<
typename
T
>
void
cuda_AveragePooling_ForwardPass
(
T
*
input_features
,
T
*
output_features
,
void
cuda_AveragePooling_ForwardPass
(
T
*
input_features
,
T
*
output_features
,
Int
nPlanes
,
Int
input_stride
,
Int
nPlanes
,
Int
input_stride
,
Int
output_stride
,
RuleBook
_rules
,
Int
output_stride
,
RuleBook
_rules
,
Int
filterVolume
)
{
Int
filterVolume
)
{
RULEBOOKITERATOR
((
AveragePooling_fp
<
T
,
32
,
32
><<<
32
,
dim3
(
32
,
32
)
>>>
(
RULEBOOKITERATOR
((
AveragePooling_fp
<
T
,
32
,
32
><<<
32
,
dim3
(
32
,
32
)
>>>
(
input_features
,
output_features
,
nPlanes
,
input_stride
,
output_stride
,
input_features
,
output_features
,
nPlanes
,
input_stride
,
output_stride
,
rbB
,
nHotB
,
1.0
/
filterVolume
));
rbB
,
nHotB
,
1.0
/
filterVolume
));
,
)
,
)
}
}
template
<
typename
T
,
Int
NTX
,
Int
NTY
>
template
<
typename
T
,
Int
NTX
,
Int
NTY
>
__global__
void
AveragePooling_bp
(
T
*
d_input_features
,
T
*
d_output_features
,
__global__
void
AveragePooling_bp
(
T
*
d_input_features
,
T
*
d_output_features
,
Int
nPlanes
,
Int
input_stride
,
Int
nPlanes
,
Int
input_stride
,
Int
output_stride
,
Int
*
rules
,
Int
nHot
,
Int
output_stride
,
Int
*
rules
,
Int
nHot
,
T
alpha
)
{
T
alpha
)
{
__shared__
Int
r
[
NTY
*
2
];
__shared__
Int
r
[
NTY
*
2
];
for
(
Int
n
=
blockIdx
.
x
*
NTY
;
n
<
nHot
;
n
+=
gridDim
.
x
*
NTY
)
{
for
(
Int
n
=
blockIdx
.
x
*
NTY
;
n
<
nHot
;
n
+=
gridDim
.
x
*
NTY
)
{
{
{
Int
i
=
threadIdx
.
x
+
NTX
*
threadIdx
.
y
;
Int
i
=
threadIdx
.
x
+
NTX
*
threadIdx
.
y
;
if
(
i
<
NTY
*
2
and
i
<
2
*
(
nHot
-
n
))
if
(
i
<
NTY
*
2
and
i
<
2
*
(
nHot
-
n
))
r
[
i
]
=
rules
[
2
*
n
+
i
];
r
[
i
]
=
rules
[
2
*
n
+
i
];
}
}
__syncthreads
();
__syncthreads
();
if
(
n
+
threadIdx
.
y
<
nHot
)
{
if
(
n
+
threadIdx
.
y
<
nHot
)
{
Int
i
=
r
[
2
*
threadIdx
.
y
]
*
input_stride
;
Int
i
=
r
[
2
*
threadIdx
.
y
]
*
input_stride
;
Int
o
=
r
[
2
*
threadIdx
.
y
+
1
]
*
output_stride
;
Int
o
=
r
[
2
*
threadIdx
.
y
+
1
]
*
output_stride
;
for
(
Int
plane
=
threadIdx
.
x
;
plane
<
nPlanes
;
plane
+=
NTX
)
for
(
Int
plane
=
threadIdx
.
x
;
plane
<
nPlanes
;
plane
+=
NTX
)
d_input_features
[
i
+
plane
]
+=
alpha
*
d_output_features
[
o
+
plane
];
d_input_features
[
i
+
plane
]
+=
alpha
*
d_output_features
[
o
+
plane
];
}
}
__syncthreads
();
__syncthreads
();
}
}
...
@@ -66,11 +67,11 @@ __global__ void AveragePooling_bp(T *d_input_features, T *d_output_features,
...
@@ -66,11 +67,11 @@ __global__ void AveragePooling_bp(T *d_input_features, T *d_output_features,
template
<
typename
T
>
template
<
typename
T
>
void
cuda_AveragePooling_BackwardPass
(
T
*
d_input_features
,
T
*
d_output_features
,
void
cuda_AveragePooling_BackwardPass
(
T
*
d_input_features
,
T
*
d_output_features
,
Int
nPlanes
,
Int
input_stride
,
Int
nPlanes
,
Int
input_stride
,
Int
output_stride
,
RuleBook
_rules
,
Int
output_stride
,
RuleBook
_rules
,
Int
filterVolume
)
{
Int
filterVolume
)
{
RULEBOOKITERATOR
((
AveragePooling_bp
<
T
,
32
,
32
><<<
32
,
dim3
(
32
,
32
)
>>>
(
RULEBOOKITERATOR
((
AveragePooling_bp
<
T
,
32
,
32
><<<
32
,
dim3
(
32
,
32
)
>>>
(
d_input_features
,
d_output_features
,
nPlanes
,
input_stride
,
output_stride
,
d_input_features
,
d_output_features
,
nPlanes
,
input_stride
,
output_stride
,
rbB
,
nHotB
,
1.0
/
filterVolume
));
rbB
,
nHotB
,
1.0
/
filterVolume
));
,
)
,
)
}
}
sparseconvnet/SCN/Metadata/Metadata.cpp
View file @
54c58b5f
...
@@ -256,15 +256,14 @@ void Metadata<dimension>::appendMetadata(Metadata<dimension> &mAdd,
...
@@ -256,15 +256,14 @@ void Metadata<dimension>::appendMetadata(Metadata<dimension> &mAdd,
template
<
Int
dimension
>
template
<
Int
dimension
>
std
::
vector
<
at
::
Tensor
>
std
::
vector
<
at
::
Tensor
>
Metadata
<
dimension
>::
sparsifyCompare
(
Metadata
<
dimension
>
&
mReference
,
Metadata
<
dimension
>::
sparsifyCompare
(
Metadata
<
dimension
>
&
mReference
,
Metadata
<
dimension
>
&
mSparsified
,
/*long*/
at
::
Tensor
spatialSize
)
{
/*long*/
at
::
Tensor
spatialSize
)
{
auto
p
=
LongTensorToPoint
<
dimension
>
(
spatialSize
);
auto
p
=
LongTensorToPoint
<
dimension
>
(
spatialSize
);
at
::
Tensor
delta
=
torch
::
zeros
({
nActive
[
p
]},
at
::
k
Float
);
at
::
Tensor
gt
=
torch
::
zeros
({
nActive
[
p
]},
at
::
k
Byte
);
at
::
Tensor
ref_map
=
torch
::
empty
({
mReference
.
nActive
[
p
]},
at
::
kLong
);
at
::
Tensor
ref_map
=
torch
::
empty
({
mReference
.
nActive
[
p
]},
at
::
kLong
);
float
*
deltaPtr
=
delta
.
data
<
float
>
();
auto
gtPtr
=
(
signed
char
*
)
gt
.
data_ptr
();
//<signed char>();
// auto gtPtr = gt.data<signed char>();
auto
&
sgsReference
=
mReference
.
grids
[
p
];
auto
&
sgsReference
=
mReference
.
grids
[
p
];
auto
&
sgsFull
=
grids
[
p
];
auto
&
sgsFull
=
grids
[
p
];
auto
&
sgsSparsified
=
mSparsified
.
grids
[
p
];
Int
batchSize
=
sgsFull
.
size
();
Int
batchSize
=
sgsFull
.
size
();
Int
sample
;
Int
sample
;
...
@@ -272,20 +271,16 @@ Metadata<dimension>::sparsifyCompare(Metadata<dimension> &mReference,
...
@@ -272,20 +271,16 @@ Metadata<dimension>::sparsifyCompare(Metadata<dimension> &mReference,
for
(
sample
=
0
;
sample
<
(
Int
)
batchSize
;
++
sample
)
{
for
(
sample
=
0
;
sample
<
(
Int
)
batchSize
;
++
sample
)
{
auto
&
sgReference
=
sgsReference
[
sample
];
auto
&
sgReference
=
sgsReference
[
sample
];
auto
&
sgFull
=
sgsFull
[
sample
];
auto
&
sgFull
=
sgsFull
[
sample
];
auto
&
sgSparsified
=
sgsSparsified
[
sample
];
for
(
auto
const
&
iter
:
sgFull
.
mp
)
{
for
(
auto
const
&
iter
:
sgFull
.
mp
)
{
bool
gt
=
sgReference
.
mp
.
find
(
iter
.
first
)
!=
sgReference
.
mp
.
end
();
bool
gt_
=
sgReference
.
mp
.
find
(
iter
.
first
)
!=
sgReference
.
mp
.
end
();
bool
hot
=
sgSparsified
.
mp
.
find
(
iter
.
first
)
!=
sgSparsified
.
mp
.
end
();
if
(
gt_
)
{
if
(
gt
)
ref_map
[
sgReference
.
mp
[
iter
.
first
]
+
sgReference
.
ctr
]
=
ref_map
[
sgReference
.
mp
[
iter
.
first
]
+
sgReference
.
ctr
]
=
iter
.
second
+
sgFull
.
ctr
;
iter
.
second
+
sgFull
.
ctr
;
if
(
gt
and
not
hot
)
gtPtr
[
iter
.
second
+
sgFull
.
ctr
]
=
+
1
;
deltaPtr
[
iter
.
second
+
sgFull
.
ctr
]
=
-
1
;
}
if
(
hot
and
not
gt
)
deltaPtr
[
iter
.
second
+
sgFull
.
ctr
]
=
+
1
;
}
}
}
}
return
{
delta
,
ref_map
};
return
{
gt
,
ref_map
};
}
}
// tensor is size[0] x .. x size[dimension-1] x size[dimension]
// tensor is size[0] x .. x size[dimension-1] x size[dimension]
...
...
sparseconvnet/SCN/Metadata/Metadata.h
View file @
54c58b5f
...
@@ -104,8 +104,13 @@ public:
...
@@ -104,8 +104,13 @@ public:
void
appendMetadata
(
Metadata
<
dimension
>
&
mAdd
,
void
appendMetadata
(
Metadata
<
dimension
>
&
mAdd
,
/*long*/
at
::
Tensor
spatialSize
);
/*long*/
at
::
Tensor
spatialSize
);
/* std::vector<at::Tensor> sparsifyCompare(Metadata<dimension> &mReference, */
/* Metadata<dimension> &mSparsified,
*/
/* /\*long*\/ at::Tensor spatialSize);
*/
std
::
vector
<
at
::
Tensor
>
sparsifyCompare
(
Metadata
<
dimension
>
&
mReference
,
std
::
vector
<
at
::
Tensor
>
sparsifyCompare
(
Metadata
<
dimension
>
&
mReference
,
Metadata
<
dimension
>
&
mSparsified
,
/*long*/
at
::
Tensor
spatialSize
);
/*long*/
at
::
Tensor
spatialSize
);
// tensor is size[0] x .. x size[dimension-1] x size[dimension]
// tensor is size[0] x .. x size[dimension-1] x size[dimension]
...
...
sparseconvnet/networkArchitectures.py
View file @
54c58b5f
...
@@ -200,7 +200,7 @@ def SparseResNet(dimension, nInputPlanes, layers):
...
@@ -200,7 +200,7 @@ def SparseResNet(dimension, nInputPlanes, layers):
return
m
return
m
def
UNet
(
dimension
,
reps
,
nPlanes
,
residual_blocks
=
False
,
downsample
=
[
2
,
2
],
leakiness
=
0
):
def
UNet
(
dimension
,
reps
,
nPlanes
,
residual_blocks
=
False
,
downsample
=
[
2
,
2
],
leakiness
=
0
,
n_input_planes
=-
1
):
"""
"""
U-Net style network with VGG or ResNet-style blocks.
U-Net style network with VGG or ResNet-style blocks.
For voxel level prediction:
For voxel level prediction:
...
@@ -218,6 +218,8 @@ def UNet(dimension, reps, nPlanes, residual_blocks=False, downsample=[2, 2], lea
...
@@ -218,6 +218,8 @@ def UNet(dimension, reps, nPlanes, residual_blocks=False, downsample=[2, 2], lea
x=self.linear(x)
x=self.linear(x)
return x
return x
"""
"""
if
n_input_planes
==-
1
:
n_input_planes
=
nPlanes
[
0
]
def
block
(
m
,
a
,
b
):
def
block
(
m
,
a
,
b
):
if
residual_blocks
:
#ResNet style blocks
if
residual_blocks
:
#ResNet style blocks
m
.
add
(
scn
.
ConcatTable
()
m
.
add
(
scn
.
ConcatTable
()
...
@@ -234,13 +236,9 @@ def UNet(dimension, reps, nPlanes, residual_blocks=False, downsample=[2, 2], lea
...
@@ -234,13 +236,9 @@ def UNet(dimension, reps, nPlanes, residual_blocks=False, downsample=[2, 2], lea
.
add
(
scn
.
SubmanifoldConvolution
(
dimension
,
a
,
b
,
3
,
False
)))
.
add
(
scn
.
SubmanifoldConvolution
(
dimension
,
a
,
b
,
3
,
False
)))
def
U
(
nPlanes
):
#Recursive function
def
U
(
nPlanes
):
#Recursive function
m
=
scn
.
Sequential
()
m
=
scn
.
Sequential
()
if
len
(
nPlanes
)
==
1
:
for
i
in
range
(
reps
):
for
_
in
range
(
reps
):
block
(
m
,
n_input_planes
if
i
==
0
else
nPlanes
[
0
],
nPlanes
[
0
])
block
(
m
,
nPlanes
[
0
],
nPlanes
[
0
])
if
len
(
nPlanes
)
>
1
:
else
:
m
=
scn
.
Sequential
()
for
_
in
range
(
reps
):
block
(
m
,
nPlanes
[
0
],
nPlanes
[
0
])
m
.
add
(
m
.
add
(
scn
.
ConcatTable
().
add
(
scn
.
ConcatTable
().
add
(
scn
.
Identity
()).
add
(
scn
.
Identity
()).
add
(
...
...
sparseconvnet/sparsify.py
View file @
54c58b5f
...
@@ -4,14 +4,20 @@
...
@@ -4,14 +4,20 @@
# This source code is licensed under the BSD-style license found in the
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# LICENSE file in the root directory of this source tree.
import
sparseconvnet
from
torch.autograd
import
Function
,
Variable
from
torch.autograd
import
Function
,
Variable
from
torch.nn
import
Module
,
Parameter
from
torch.nn
import
Module
import
sparseconvnet
from
.utils
import
*
from
.utils
import
*
from
.sparseConvNetTensor
import
SparseConvNetTensor
from
.sparseConvNetTensor
import
SparseConvNetTensor
from
.metadata
import
Metadata
from
.metadata
import
Metadata
from
.sequential
import
Sequential
from
.activations
import
Sigmoid
from
.networkInNetwork
import
NetworkInNetwork
class
Sparsify
(
Module
):
class
SparsifyFCS
(
Module
):
"""
Sparsify by looking at the first feature channel's sign.
"""
def
__init__
(
self
,
dimension
):
def
__init__
(
self
,
dimension
):
Module
.
__init__
(
self
)
Module
.
__init__
(
self
)
self
.
dimension
=
dimension
self
.
dimension
=
dimension
...
@@ -31,3 +37,65 @@ class Sparsify(Module):
...
@@ -31,3 +37,65 @@ class Sparsify(Module):
return
output
return
output
else
:
else
:
return
input
return
input
class
FakeGradHardSigmoidFunction
(
torch
.
autograd
.
Function
):
@
staticmethod
def
forward
(
ctx
,
x
):
ctx
.
save_for_backward
(
x
)
with
torch
.
no_grad
():
y
=
(
x
>
0
).
float
()
return
y
@
staticmethod
def
backward
(
ctx
,
grad_output
):
return
grad_output
x
,
=
ctx
.
saved_tensors
with
torch
.
no_grad
():
#Either:
#y=torch.sigmoid(x) #torch.sigmoid(x/5)?
#df = y*(1-y)
#Or:
df
=
((
-
2
<
x
)
*
(
x
<+
2
)).
float
()
*
0.25
#
grad_input
=
grad_output
*
df
return
grad_input
class
FakeGradHardSigmoid
(
Module
):
def
forward
(
self
,
input
):
output
=
SparseConvNetTensor
()
output
.
features
=
FakeGradHardSigmoidFunction
.
apply
(
input
.
features
)
output
.
metadata
=
input
.
metadata
output
.
spatial_size
=
input
.
spatial_size
return
output
class
Sparsify
(
Module
):
def
__init__
(
self
,
dimension
,
nIn
,
activation
=
None
):
Module
.
__init__
(
self
)
self
.
dimension
=
dimension
self
.
activation
=
activation
if
activation
==
'fakeGradHardSigmoid'
:
self
.
net
=
Sequential
(
NetworkInNetwork
(
nIn
,
1
,
True
),
FakeGradHardSigmoid
())
elif
activation
==
'sigmoid'
:
self
.
net
=
Sequential
(
NetworkInNetwork
(
nIn
,
1
,
True
),
Sigmoid
())
else
:
self
.
net
=
NetworkInNetwork
(
nIn
,
1
,
True
)
def
forward
(
self
,
input
):
if
input
.
features
.
numel
():
output
=
SparseConvNetTensor
()
output
.
spatial_size
=
input
.
spatial_size
output
.
metadata
=
Metadata
(
self
.
dimension
)
output
.
mask
=
self
.
net
(
input
).
features
.
view
(
-
1
)
active
=
output
.
mask
>
(
0.5
if
self
.
activation
else
0
)
output
.
features
=
input
.
features
[
active
]
active
=
active
.
cpu
()
input
.
metadata
.
sparsifyMetadata
(
output
.
metadata
,
input
.
spatial_size
,
active
.
byte
(),
active
.
long
().
cumsum
(
0
))
#print('Sparsify2 output', output.features.shape, output.mask.features.shape)
return
output
else
:
input
.
mask
=
None
return
input
sparseconvnet/tables.py
View file @
54c58b5f
...
@@ -18,7 +18,7 @@ class JoinTable(torch.nn.Sequential):
...
@@ -18,7 +18,7 @@ class JoinTable(torch.nn.Sequential):
output
=
SparseConvNetTensor
()
output
=
SparseConvNetTensor
()
output
.
metadata
=
input
[
0
].
metadata
output
.
metadata
=
input
[
0
].
metadata
output
.
spatial_size
=
input
[
0
].
spatial_size
output
.
spatial_size
=
input
[
0
].
spatial_size
output
.
features
=
torch
.
cat
([
i
.
features
for
i
in
input
],
1
)
output
.
features
=
torch
.
cat
([
i
.
features
for
i
in
input
],
1
)
if
input
[
0
].
features
.
numel
()
else
input
[
0
].
features
return
output
return
output
def
input_spatial_size
(
self
,
out_size
):
def
input_spatial_size
(
self
,
out_size
):
...
...
sparseconvnet/utils.py
View file @
54c58b5f
...
@@ -145,6 +145,8 @@ def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0):
...
@@ -145,6 +145,8 @@ def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0):
def
is_power2
(
num
):
def
is_power2
(
num
):
return
num
!=
0
and
((
num
&
(
num
-
1
))
==
0
)
return
num
!=
0
and
((
num
&
(
num
-
1
))
==
0
)
def
has_only_one_nonzero_digit
(
num
):
#https://oeis.org/A037124
return
num
!=
0
and
(
num
/
10
**
math
.
floor
(
math
.
log
(
num
,
10
))).
is_integer
()
def
checkpoint_save
(
model
,
exp_name
,
name2
,
epoch
,
use_cuda
=
True
):
def
checkpoint_save
(
model
,
exp_name
,
name2
,
epoch
,
use_cuda
=
True
):
f
=
exp_name
+
'-%09d-'
%
epoch
+
name2
+
'.pth'
f
=
exp_name
+
'-%09d-'
%
epoch
+
name2
+
'.pth'
model
.
cpu
()
model
.
cpu
()
...
@@ -157,3 +159,6 @@ def checkpoint_save(model,exp_name,name2,epoch, use_cuda=True):
...
@@ -157,3 +159,6 @@ def checkpoint_save(model,exp_name,name2,epoch, use_cuda=True):
if
os
.
path
.
isfile
(
f
):
if
os
.
path
.
isfile
(
f
):
if
not
is_power2
(
epoch
):
if
not
is_power2
(
epoch
):
os
.
remove
(
f
)
os
.
remove
(
f
)
def
random_rotation
(
dimension
=
3
):
return
torch
.
qr
(
torch
.
randn
(
dimension
,
dimension
))[
0
]
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment