Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
f5fc733a
Commit
f5fc733a
authored
Feb 03, 2022
by
Byzantine
Browse files
Removing research/community models
parent
09bc9f54
Changes
326
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
0 additions
and
3236 deletions
+0
-3236
research/cognitive_mapping_and_planning/scripts/__init__.py
research/cognitive_mapping_and_planning/scripts/__init__.py
+0
-0
research/cognitive_mapping_and_planning/scripts/script_distill.py
.../cognitive_mapping_and_planning/scripts/script_distill.py
+0
-177
research/cognitive_mapping_and_planning/scripts/script_download_init_models.sh
...pping_and_planning/scripts/script_download_init_models.sh
+0
-18
research/cognitive_mapping_and_planning/scripts/script_env_vis.py
.../cognitive_mapping_and_planning/scripts/script_env_vis.py
+0
-186
research/cognitive_mapping_and_planning/scripts/script_nav_agent_release.py
..._mapping_and_planning/scripts/script_nav_agent_release.py
+0
-253
research/cognitive_mapping_and_planning/scripts/script_plot_trajectory.py
...ve_mapping_and_planning/scripts/script_plot_trajectory.py
+0
-339
research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.py
...nd_planning/scripts/script_preprocess_annoations_S3DIS.py
+0
-197
research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.sh
...nd_planning/scripts/script_preprocess_annoations_S3DIS.sh
+0
-24
research/cognitive_mapping_and_planning/scripts/script_preprocess_meshes_S3DIS.sh
...ng_and_planning/scripts/script_preprocess_meshes_S3DIS.sh
+0
-37
research/cognitive_mapping_and_planning/scripts/script_test_pretrained_models.sh
...ing_and_planning/scripts/script_test_pretrained_models.sh
+0
-63
research/cognitive_mapping_and_planning/src/__init__.py
research/cognitive_mapping_and_planning/src/__init__.py
+0
-0
research/cognitive_mapping_and_planning/src/depth_utils.py
research/cognitive_mapping_and_planning/src/depth_utils.py
+0
-96
research/cognitive_mapping_and_planning/src/file_utils.py
research/cognitive_mapping_and_planning/src/file_utils.py
+0
-42
research/cognitive_mapping_and_planning/src/graph_utils.py
research/cognitive_mapping_and_planning/src/graph_utils.py
+0
-552
research/cognitive_mapping_and_planning/src/map_utils.py
research/cognitive_mapping_and_planning/src/map_utils.py
+0
-245
research/cognitive_mapping_and_planning/src/rotation_utils.py
...arch/cognitive_mapping_and_planning/src/rotation_utils.py
+0
-73
research/cognitive_mapping_and_planning/src/utils.py
research/cognitive_mapping_and_planning/src/utils.py
+0
-168
research/cognitive_mapping_and_planning/tfcode/__init__.py
research/cognitive_mapping_and_planning/tfcode/__init__.py
+0
-0
research/cognitive_mapping_and_planning/tfcode/cmp.py
research/cognitive_mapping_and_planning/tfcode/cmp.py
+0
-553
research/cognitive_mapping_and_planning/tfcode/cmp_summary.py
...arch/cognitive_mapping_and_planning/tfcode/cmp_summary.py
+0
-213
No files found.
Too many changes to show.
To preserve performance only
326 of 326+
files are displayed.
Plain diff
Email patch
research/cognitive_mapping_and_planning/scripts/__init__.py
deleted
100644 → 0
View file @
09bc9f54
research/cognitive_mapping_and_planning/scripts/script_distill.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r
""" Script to setup the grid moving agent.
blaze build --define=ION_GFX_OGLES20=1 -c opt --copt=-mavx --config=cuda_clang \
learning/brain/public/tensorflow_std_server{,_gpu} \
experimental/users/saurabhgupta/navigation/cmp/scripts/script_distill.par \
experimental/users/saurabhgupta/navigation/cmp/scripts/script_distill
./blaze-bin/experimental/users/saurabhgupta/navigation/cmp/scripts/script_distill \
--logdir=/cns/iq-d/home/saurabhgupta/output/stanford-distill/local/v0/ \
--config_name 'v0+train' --gfs_user robot-intelligence-gpu
"""
import
sys
,
os
,
numpy
as
np
import
copy
import
argparse
,
pprint
import
time
import
cProfile
import
tensorflow
as
tf
from
tensorflow.contrib
import
slim
from
tensorflow.python.framework
import
ops
from
tensorflow.contrib.framework.python.ops
import
variables
import
logging
from
tensorflow.python.platform
import
gfile
from
tensorflow.python.platform
import
app
from
tensorflow.python.platform
import
flags
from
cfgs
import
config_distill
from
tfcode
import
tf_utils
import
src.utils
as
utils
import
src.file_utils
as
fu
import
tfcode.distillation
as
distill
import
datasets.nav_env
as
nav_env
FLAGS
=
flags
.
FLAGS
flags
.
DEFINE_string
(
'master'
,
'local'
,
'The name of the TensorFlow master to use.'
)
flags
.
DEFINE_integer
(
'ps_tasks'
,
0
,
'The number of parameter servers. If the '
'value is 0, then the parameters are handled locally by '
'the worker.'
)
flags
.
DEFINE_integer
(
'task'
,
0
,
'The Task ID. This value is used when training '
'with multiple workers to identify each worker.'
)
flags
.
DEFINE_integer
(
'num_workers'
,
1
,
''
)
flags
.
DEFINE_string
(
'config_name'
,
''
,
''
)
flags
.
DEFINE_string
(
'logdir'
,
''
,
''
)
def
main
(
_
):
args
=
config_distill
.
get_args_for_config
(
FLAGS
.
config_name
)
args
.
logdir
=
FLAGS
.
logdir
args
.
solver
.
num_workers
=
FLAGS
.
num_workers
args
.
solver
.
task
=
FLAGS
.
task
args
.
solver
.
ps_tasks
=
FLAGS
.
ps_tasks
args
.
solver
.
master
=
FLAGS
.
master
args
.
buildinger
.
env_class
=
nav_env
.
MeshMapper
fu
.
makedirs
(
args
.
logdir
)
args
.
buildinger
.
logdir
=
args
.
logdir
R
=
nav_env
.
get_multiplexor_class
(
args
.
buildinger
,
args
.
solver
.
task
)
if
False
:
pr
=
cProfile
.
Profile
()
pr
.
enable
()
rng
=
np
.
random
.
RandomState
(
0
)
for
i
in
range
(
1
):
b
,
instances_perturbs
=
R
.
sample_building
(
rng
)
inputs
=
b
.
worker
(
*
(
instances_perturbs
))
for
j
in
range
(
inputs
[
'imgs'
].
shape
[
0
]):
p
=
os
.
path
.
join
(
'tmp'
,
'{:d}.png'
.
format
(
j
))
img
=
inputs
[
'imgs'
][
j
,
0
,:,:,:
3
]
*
1
img
=
(
img
).
astype
(
np
.
uint8
)
fu
.
write_image
(
p
,
img
)
print
(
inputs
[
'imgs'
].
shape
)
inputs
=
R
.
pre
(
inputs
)
pr
.
disable
()
pr
.
print_stats
(
2
)
if
args
.
control
.
train
:
if
not
gfile
.
Exists
(
args
.
logdir
):
gfile
.
MakeDirs
(
args
.
logdir
)
m
=
utils
.
Foo
()
m
.
tf_graph
=
tf
.
Graph
()
config
=
tf
.
ConfigProto
()
config
.
device_count
[
'GPU'
]
=
1
config
.
gpu_options
.
allow_growth
=
True
config
.
gpu_options
.
per_process_gpu_memory_fraction
=
0.8
with
m
.
tf_graph
.
as_default
():
with
tf
.
device
(
tf
.
train
.
replica_device_setter
(
args
.
solver
.
ps_tasks
)):
m
=
distill
.
setup_to_run
(
m
,
args
,
is_training
=
True
,
batch_norm_is_training
=
True
)
train_step_kwargs
=
distill
.
setup_train_step_kwargs_mesh
(
m
,
R
,
os
.
path
.
join
(
args
.
logdir
,
'train'
),
rng_seed
=
args
.
solver
.
task
,
is_chief
=
args
.
solver
.
task
==
0
,
iters
=
1
,
train_display_interval
=
args
.
summary
.
display_interval
)
final_loss
=
slim
.
learning
.
train
(
train_op
=
m
.
train_op
,
logdir
=
args
.
logdir
,
master
=
args
.
solver
.
master
,
is_chief
=
args
.
solver
.
task
==
0
,
number_of_steps
=
args
.
solver
.
max_steps
,
train_step_fn
=
tf_utils
.
train_step_custom
,
train_step_kwargs
=
train_step_kwargs
,
global_step
=
m
.
global_step_op
,
init_op
=
m
.
init_op
,
init_fn
=
m
.
init_fn
,
sync_optimizer
=
m
.
sync_optimizer
,
saver
=
m
.
saver_op
,
summary_op
=
None
,
session_config
=
config
)
if
args
.
control
.
test
:
m
=
utils
.
Foo
()
m
.
tf_graph
=
tf
.
Graph
()
checkpoint_dir
=
os
.
path
.
join
(
format
(
args
.
logdir
))
with
m
.
tf_graph
.
as_default
():
m
=
distill
.
setup_to_run
(
m
,
args
,
is_training
=
False
,
batch_norm_is_training
=
args
.
control
.
force_batchnorm_is_training_at_test
)
train_step_kwargs
=
distill
.
setup_train_step_kwargs_mesh
(
m
,
R
,
os
.
path
.
join
(
args
.
logdir
,
args
.
control
.
test_name
),
rng_seed
=
args
.
solver
.
task
+
1
,
is_chief
=
args
.
solver
.
task
==
0
,
iters
=
args
.
summary
.
test_iters
,
train_display_interval
=
None
)
sv
=
slim
.
learning
.
supervisor
.
Supervisor
(
graph
=
ops
.
get_default_graph
(),
logdir
=
None
,
init_op
=
m
.
init_op
,
summary_op
=
None
,
summary_writer
=
None
,
global_step
=
None
,
saver
=
m
.
saver_op
)
last_checkpoint
=
None
while
True
:
last_checkpoint
=
slim
.
evaluation
.
wait_for_new_checkpoint
(
checkpoint_dir
,
last_checkpoint
)
checkpoint_iter
=
int
(
os
.
path
.
basename
(
last_checkpoint
).
split
(
'-'
)[
1
])
start
=
time
.
time
()
logging
.
info
(
'Starting evaluation at %s using checkpoint %s.'
,
time
.
strftime
(
'%Y-%m-%d-%H:%M:%S'
,
time
.
localtime
()),
last_checkpoint
)
config
=
tf
.
ConfigProto
()
config
.
device_count
[
'GPU'
]
=
1
config
.
gpu_options
.
allow_growth
=
True
config
.
gpu_options
.
per_process_gpu_memory_fraction
=
0.8
with
sv
.
managed_session
(
args
.
solver
.
master
,
config
=
config
,
start_standard_services
=
False
)
as
sess
:
sess
.
run
(
m
.
init_op
)
sv
.
saver
.
restore
(
sess
,
last_checkpoint
)
sv
.
start_queue_runners
(
sess
)
vals
,
_
=
tf_utils
.
train_step_custom
(
sess
,
None
,
m
.
global_step_op
,
train_step_kwargs
,
mode
=
'val'
)
if
checkpoint_iter
>=
args
.
solver
.
max_steps
:
break
if
__name__
==
'__main__'
:
app
.
run
()
research/cognitive_mapping_and_planning/scripts/script_download_init_models.sh
deleted
100644 → 0
View file @
09bc9f54
# Script to download models to initialize the RGB and D models for training.We
# use ResNet-v2-50 for both modalities.
mkdir
-p
data/init_models
cd
data/init_models
# RGB Models are initialized by pre-training on ImageNet.
mkdir
-p
resnet_v2_50
RGB_URL
=
"http://download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz"
wget
$RGB_URL
tar
-xf
resnet_v2_50_2017_04_14.tar.gz
-C
resnet_v2_50
# Depth models are initialized by distilling the RGB model to D images using
# Cross-Modal Distillation (https://arxiv.org/abs/1507.00448).
mkdir
-p
distill_rgb_to_d_resnet_v2_50
D_URL
=
"http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/distill_rgb_to_d_resnet_v2_50.tar"
wget
$D_URL
tar
-xf
distill_rgb_to_d_resnet_v2_50.tar
-C
distill_rgb_to_d_resnet_v2_50
research/cognitive_mapping_and_planning/scripts/script_env_vis.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple python function to walk in the enviornments that we have created.
PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_env_vis.py
\
--dataset_name sbpd --building_name area3
"""
import
sys
import
numpy
as
np
import
matplotlib
matplotlib
.
use
(
'TkAgg'
)
from
PIL
import
ImageTk
,
Image
import
Tkinter
as
tk
import
logging
from
tensorflow.python.platform
import
app
from
tensorflow.python.platform
import
flags
import
datasets.nav_env_config
as
nec
import
datasets.nav_env
as
nav_env
import
cv2
from
datasets
import
factory
import
render.swiftshader_renderer
as
renderer
SwiftshaderRenderer
=
renderer
.
SwiftshaderRenderer
VisualNavigationEnv
=
nav_env
.
VisualNavigationEnv
FLAGS
=
flags
.
FLAGS
flags
.
DEFINE_string
(
'dataset_name'
,
'sbpd'
,
'Name of the dataset.'
)
flags
.
DEFINE_float
(
'fov'
,
60.
,
'Field of view'
)
flags
.
DEFINE_integer
(
'image_size'
,
512
,
'Size of the image.'
)
flags
.
DEFINE_string
(
'building_name'
,
''
,
'Name of the building.'
)
def
get_args
():
navtask
=
nec
.
nav_env_base_config
()
navtask
.
task_params
.
type
=
'rng_rejection_sampling_many'
navtask
.
task_params
.
rejection_sampling_M
=
2000
navtask
.
task_params
.
min_dist
=
10
sz
=
FLAGS
.
image_size
navtask
.
camera_param
.
fov
=
FLAGS
.
fov
navtask
.
camera_param
.
height
=
sz
navtask
.
camera_param
.
width
=
sz
navtask
.
task_params
.
img_height
=
sz
navtask
.
task_params
.
img_width
=
sz
# navtask.task_params.semantic_task.class_map_names = ['chair', 'door', 'table']
# navtask.task_params.type = 'to_nearest_obj_acc'
logging
.
info
(
'navtask: %s'
,
navtask
)
return
navtask
def
load_building
(
dataset_name
,
building_name
):
dataset
=
factory
.
get_dataset
(
dataset_name
)
navtask
=
get_args
()
cp
=
navtask
.
camera_param
rgb_shader
,
d_shader
=
renderer
.
get_shaders
(
cp
.
modalities
)
r_obj
=
SwiftshaderRenderer
()
r_obj
.
init_display
(
width
=
cp
.
width
,
height
=
cp
.
height
,
fov
=
cp
.
fov
,
z_near
=
cp
.
z_near
,
z_far
=
cp
.
z_far
,
rgb_shader
=
rgb_shader
,
d_shader
=
d_shader
)
r_obj
.
clear_scene
()
b
=
VisualNavigationEnv
(
robot
=
navtask
.
robot
,
env
=
navtask
.
env
,
task_params
=
navtask
.
task_params
,
building_name
=
building_name
,
flip
=
False
,
logdir
=
None
,
building_loader
=
dataset
,
r_obj
=
r_obj
)
b
.
load_building_into_scene
()
b
.
set_building_visibility
(
False
)
return
b
def
walk_through
(
b
):
# init agent at a random location in the environment.
init_env_state
=
b
.
reset
([
np
.
random
.
RandomState
(
0
),
np
.
random
.
RandomState
(
0
)])
global
current_node
rng
=
np
.
random
.
RandomState
(
0
)
current_node
=
rng
.
choice
(
b
.
task
.
nodes
.
shape
[
0
])
root
=
tk
.
Tk
()
image
=
b
.
render_nodes
(
b
.
task
.
nodes
[[
current_node
],:])[
0
]
print
(
image
.
shape
)
image
=
image
.
astype
(
np
.
uint8
)
im
=
Image
.
fromarray
(
image
)
im
=
ImageTk
.
PhotoImage
(
im
)
panel
=
tk
.
Label
(
root
,
image
=
im
)
map_size
=
b
.
traversible
.
shape
sc
=
np
.
max
(
map_size
)
/
256.
loc
=
np
.
array
([[
map_size
[
1
]
/
2.
,
map_size
[
0
]
/
2.
]])
x_axis
=
np
.
zeros_like
(
loc
);
x_axis
[:,
1
]
=
sc
y_axis
=
np
.
zeros_like
(
loc
);
y_axis
[:,
0
]
=
-
sc
cum_fs
,
cum_valid
=
nav_env
.
get_map_to_predict
(
loc
,
x_axis
,
y_axis
,
map
=
b
.
traversible
*
1.
,
map_size
=
256
)
cum_fs
=
cum_fs
[
0
]
cum_fs
=
cv2
.
applyColorMap
((
cum_fs
*
255
).
astype
(
np
.
uint8
),
cv2
.
COLORMAP_JET
)
im
=
Image
.
fromarray
(
cum_fs
)
im
=
ImageTk
.
PhotoImage
(
im
)
panel_overhead
=
tk
.
Label
(
root
,
image
=
im
)
def
refresh
():
global
current_node
image
=
b
.
render_nodes
(
b
.
task
.
nodes
[[
current_node
],:])[
0
]
image
=
image
.
astype
(
np
.
uint8
)
im
=
Image
.
fromarray
(
image
)
im
=
ImageTk
.
PhotoImage
(
im
)
panel
.
configure
(
image
=
im
)
panel
.
image
=
im
def
left_key
(
event
):
global
current_node
current_node
=
b
.
take_action
([
current_node
],
[
2
],
1
)[
0
][
0
]
refresh
()
def
up_key
(
event
):
global
current_node
current_node
=
b
.
take_action
([
current_node
],
[
3
],
1
)[
0
][
0
]
refresh
()
def
right_key
(
event
):
global
current_node
current_node
=
b
.
take_action
([
current_node
],
[
1
],
1
)[
0
][
0
]
refresh
()
def
quit
(
event
):
root
.
destroy
()
panel_overhead
.
grid
(
row
=
4
,
column
=
5
,
rowspan
=
1
,
columnspan
=
1
,
sticky
=
tk
.
W
+
tk
.
E
+
tk
.
N
+
tk
.
S
)
panel
.
bind
(
'<Left>'
,
left_key
)
panel
.
bind
(
'<Up>'
,
up_key
)
panel
.
bind
(
'<Right>'
,
right_key
)
panel
.
bind
(
'q'
,
quit
)
panel
.
focus_set
()
panel
.
grid
(
row
=
0
,
column
=
0
,
rowspan
=
5
,
columnspan
=
5
,
sticky
=
tk
.
W
+
tk
.
E
+
tk
.
N
+
tk
.
S
)
root
.
mainloop
()
def
simple_window
():
root
=
tk
.
Tk
()
image
=
np
.
zeros
((
128
,
128
,
3
),
dtype
=
np
.
uint8
)
image
[
32
:
96
,
32
:
96
,
0
]
=
255
im
=
Image
.
fromarray
(
image
)
im
=
ImageTk
.
PhotoImage
(
im
)
image
=
np
.
zeros
((
128
,
128
,
3
),
dtype
=
np
.
uint8
)
image
[
32
:
96
,
32
:
96
,
1
]
=
255
im2
=
Image
.
fromarray
(
image
)
im2
=
ImageTk
.
PhotoImage
(
im2
)
panel
=
tk
.
Label
(
root
,
image
=
im
)
def
left_key
(
event
):
panel
.
configure
(
image
=
im2
)
panel
.
image
=
im2
def
quit
(
event
):
sys
.
exit
()
panel
.
bind
(
'<Left>'
,
left_key
)
panel
.
bind
(
'<Up>'
,
left_key
)
panel
.
bind
(
'<Down>'
,
left_key
)
panel
.
bind
(
'q'
,
quit
)
panel
.
focus_set
()
panel
.
pack
(
side
=
"bottom"
,
fill
=
"both"
,
expand
=
"yes"
)
root
.
mainloop
()
def
main
(
_
):
b
=
load_building
(
FLAGS
.
dataset_name
,
FLAGS
.
building_name
)
walk_through
(
b
)
if
__name__
==
'__main__'
:
app
.
run
()
research/cognitive_mapping_and_planning/scripts/script_nav_agent_release.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r
""" Script to train and test the grid navigation agent.
Usage:
1. Testing a model.
CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 \
PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_nav_agent_release.py \
--config_name cmp.lmap_Msc.clip5.sbpd_d_r2r+bench_test \
--logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r
2. Training a model (locally).
CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 \
PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_nav_agent_release.py \
--config_name cmp.lmap_Msc.clip5.sbpd_d_r2r+train_train \
--logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r_
3. Training a model (distributed).
# See https://www.tensorflow.org/deploy/distributed on how to setup distributed
# training.
CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 \
PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_nav_agent_release.py \
--config_name cmp.lmap_Msc.clip5.sbpd_d_r2r+train_train \
--logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r_ \
--ps_tasks $num_ps --master $master_name --task $worker_id
"""
import
sys
,
os
,
numpy
as
np
import
copy
import
argparse
,
pprint
import
time
import
cProfile
import
platform
import
tensorflow
as
tf
from
tensorflow.contrib
import
slim
from
tensorflow.python.framework
import
ops
from
tensorflow.contrib.framework.python.ops
import
variables
import
logging
from
tensorflow.python.platform
import
gfile
from
tensorflow.python.platform
import
app
from
tensorflow.python.platform
import
flags
from
cfgs
import
config_cmp
from
cfgs
import
config_vision_baseline
import
datasets.nav_env
as
nav_env
import
src.file_utils
as
fu
import
src.utils
as
utils
import
tfcode.cmp
as
cmp
from
tfcode
import
tf_utils
from
tfcode
import
vision_baseline_lstm
FLAGS
=
flags
.
FLAGS
flags
.
DEFINE_string
(
'master'
,
''
,
'The address of the tensorflow master'
)
flags
.
DEFINE_integer
(
'ps_tasks'
,
0
,
'The number of parameter servers. If the '
'value is 0, then the parameters are handled locally by '
'the worker.'
)
flags
.
DEFINE_integer
(
'task'
,
0
,
'The Task ID. This value is used when training '
'with multiple workers to identify each worker.'
)
flags
.
DEFINE_integer
(
'num_workers'
,
1
,
''
)
flags
.
DEFINE_string
(
'config_name'
,
''
,
''
)
flags
.
DEFINE_string
(
'logdir'
,
''
,
''
)
flags
.
DEFINE_integer
(
'solver_seed'
,
0
,
''
)
flags
.
DEFINE_integer
(
'delay_start_iters'
,
20
,
''
)
logging
.
basicConfig
(
level
=
logging
.
INFO
)
def
main
(
_
):
_launcher
(
FLAGS
.
config_name
,
FLAGS
.
logdir
)
def
_launcher
(
config_name
,
logdir
):
args
=
_setup_args
(
config_name
,
logdir
)
fu
.
makedirs
(
args
.
logdir
)
if
args
.
control
.
train
:
_train
(
args
)
if
args
.
control
.
test
:
_test
(
args
)
def
get_args_for_config
(
config_name
):
configs
=
config_name
.
split
(
'.'
)
type
=
configs
[
0
]
config_name
=
'.'
.
join
(
configs
[
1
:])
if
type
==
'cmp'
:
args
=
config_cmp
.
get_args_for_config
(
config_name
)
args
.
setup_to_run
=
cmp
.
setup_to_run
args
.
setup_train_step_kwargs
=
cmp
.
setup_train_step_kwargs
elif
type
==
'bl'
:
args
=
config_vision_baseline
.
get_args_for_config
(
config_name
)
args
.
setup_to_run
=
vision_baseline_lstm
.
setup_to_run
args
.
setup_train_step_kwargs
=
vision_baseline_lstm
.
setup_train_step_kwargs
else
:
logging
.
fatal
(
'Unknown type: {:s}'
.
format
(
type
))
return
args
def
_setup_args
(
config_name
,
logdir
):
args
=
get_args_for_config
(
config_name
)
args
.
solver
.
num_workers
=
FLAGS
.
num_workers
args
.
solver
.
task
=
FLAGS
.
task
args
.
solver
.
ps_tasks
=
FLAGS
.
ps_tasks
args
.
solver
.
master
=
FLAGS
.
master
args
.
solver
.
seed
=
FLAGS
.
solver_seed
args
.
logdir
=
logdir
args
.
navtask
.
logdir
=
None
return
args
def
_train
(
args
):
container_name
=
""
R
=
lambda
:
nav_env
.
get_multiplexer_class
(
args
.
navtask
,
args
.
solver
.
task
)
m
=
utils
.
Foo
()
m
.
tf_graph
=
tf
.
Graph
()
config
=
tf
.
ConfigProto
()
config
.
device_count
[
'GPU'
]
=
1
with
m
.
tf_graph
.
as_default
():
with
tf
.
device
(
tf
.
train
.
replica_device_setter
(
args
.
solver
.
ps_tasks
,
merge_devices
=
True
)):
with
tf
.
container
(
container_name
):
m
=
args
.
setup_to_run
(
m
,
args
,
is_training
=
True
,
batch_norm_is_training
=
True
,
summary_mode
=
'train'
)
train_step_kwargs
=
args
.
setup_train_step_kwargs
(
m
,
R
(),
os
.
path
.
join
(
args
.
logdir
,
'train'
),
rng_seed
=
args
.
solver
.
task
,
is_chief
=
args
.
solver
.
task
==
0
,
num_steps
=
args
.
navtask
.
task_params
.
num_steps
*
args
.
navtask
.
task_params
.
num_goals
,
iters
=
1
,
train_display_interval
=
args
.
summary
.
display_interval
,
dagger_sample_bn_false
=
args
.
arch
.
dagger_sample_bn_false
)
delay_start
=
(
args
.
solver
.
task
*
(
args
.
solver
.
task
+
1
))
/
2
*
FLAGS
.
delay_start_iters
logging
.
error
(
'delaying start for task %d by %d steps.'
,
args
.
solver
.
task
,
delay_start
)
additional_args
=
{}
final_loss
=
slim
.
learning
.
train
(
train_op
=
m
.
train_op
,
logdir
=
args
.
logdir
,
master
=
args
.
solver
.
master
,
is_chief
=
args
.
solver
.
task
==
0
,
number_of_steps
=
args
.
solver
.
max_steps
,
train_step_fn
=
tf_utils
.
train_step_custom_online_sampling
,
train_step_kwargs
=
train_step_kwargs
,
global_step
=
m
.
global_step_op
,
init_op
=
m
.
init_op
,
init_fn
=
m
.
init_fn
,
sync_optimizer
=
m
.
sync_optimizer
,
saver
=
m
.
saver_op
,
startup_delay_steps
=
delay_start
,
summary_op
=
None
,
session_config
=
config
,
**
additional_args
)
def
_test
(
args
):
args
.
solver
.
master
=
''
container_name
=
""
checkpoint_dir
=
os
.
path
.
join
(
format
(
args
.
logdir
))
logging
.
error
(
'Checkpoint_dir: %s'
,
args
.
logdir
)
config
=
tf
.
ConfigProto
();
config
.
device_count
[
'GPU'
]
=
1
;
m
=
utils
.
Foo
()
m
.
tf_graph
=
tf
.
Graph
()
rng_data_seed
=
0
;
rng_action_seed
=
0
;
R
=
lambda
:
nav_env
.
get_multiplexer_class
(
args
.
navtask
,
rng_data_seed
)
with
m
.
tf_graph
.
as_default
():
with
tf
.
container
(
container_name
):
m
=
args
.
setup_to_run
(
m
,
args
,
is_training
=
False
,
batch_norm_is_training
=
args
.
control
.
force_batchnorm_is_training_at_test
,
summary_mode
=
args
.
control
.
test_mode
)
train_step_kwargs
=
args
.
setup_train_step_kwargs
(
m
,
R
(),
os
.
path
.
join
(
args
.
logdir
,
args
.
control
.
test_name
),
rng_seed
=
rng_data_seed
,
is_chief
=
True
,
num_steps
=
args
.
navtask
.
task_params
.
num_steps
*
args
.
navtask
.
task_params
.
num_goals
,
iters
=
args
.
summary
.
test_iters
,
train_display_interval
=
None
,
dagger_sample_bn_false
=
args
.
arch
.
dagger_sample_bn_false
)
saver
=
slim
.
learning
.
tf_saver
.
Saver
(
variables
.
get_variables_to_restore
())
sv
=
slim
.
learning
.
supervisor
.
Supervisor
(
graph
=
ops
.
get_default_graph
(),
logdir
=
None
,
init_op
=
m
.
init_op
,
summary_op
=
None
,
summary_writer
=
None
,
global_step
=
None
,
saver
=
m
.
saver_op
)
last_checkpoint
=
None
reported
=
False
while
True
:
last_checkpoint_
=
None
while
last_checkpoint_
is
None
:
last_checkpoint_
=
slim
.
evaluation
.
wait_for_new_checkpoint
(
checkpoint_dir
,
last_checkpoint
,
seconds_to_sleep
=
10
,
timeout
=
60
)
if
last_checkpoint_
is
None
:
break
last_checkpoint
=
last_checkpoint_
checkpoint_iter
=
int
(
os
.
path
.
basename
(
last_checkpoint
).
split
(
'-'
)[
1
])
logging
.
info
(
'Starting evaluation at %s using checkpoint %s.'
,
time
.
strftime
(
'%Y-%m-%d-%H:%M:%S'
,
time
.
localtime
()),
last_checkpoint
)
if
(
args
.
control
.
only_eval_when_done
==
False
or
checkpoint_iter
>=
args
.
solver
.
max_steps
):
start
=
time
.
time
()
logging
.
info
(
'Starting evaluation at %s using checkpoint %s.'
,
time
.
strftime
(
'%Y-%m-%d-%H:%M:%S'
,
time
.
localtime
()),
last_checkpoint
)
with
sv
.
managed_session
(
args
.
solver
.
master
,
config
=
config
,
start_standard_services
=
False
)
as
sess
:
sess
.
run
(
m
.
init_op
)
sv
.
saver
.
restore
(
sess
,
last_checkpoint
)
sv
.
start_queue_runners
(
sess
)
if
args
.
control
.
reset_rng_seed
:
train_step_kwargs
[
'rng_data'
]
=
[
np
.
random
.
RandomState
(
rng_data_seed
),
np
.
random
.
RandomState
(
rng_data_seed
)]
train_step_kwargs
[
'rng_action'
]
=
np
.
random
.
RandomState
(
rng_action_seed
)
vals
,
_
=
tf_utils
.
train_step_custom_online_sampling
(
sess
,
None
,
m
.
global_step_op
,
train_step_kwargs
,
mode
=
args
.
control
.
test_mode
)
should_stop
=
False
if
checkpoint_iter
>=
args
.
solver
.
max_steps
:
should_stop
=
True
if
should_stop
:
break
if
__name__
==
'__main__'
:
app
.
run
()
research/cognitive_mapping_and_planning/scripts/script_plot_trajectory.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r
"""
Code for plotting trajectories in the top view, and also plot first person views
from saved trajectories. Does not run the network but only loads the mesh data
to plot the view points.
CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_plot_trajectory.py \
--first_person --num_steps 40 \
--config_name cmp.lmap_Msc.clip5.sbpd_d_r2r \
--imset test --alsologtostderr --base_dir output --out_dir vis
"""
import
os
,
sys
,
numpy
as
np
,
copy
import
matplotlib
matplotlib
.
use
(
"Agg"
)
import
matplotlib.pyplot
as
plt
import
matplotlib.animation
as
animation
from
matplotlib.gridspec
import
GridSpec
import
tensorflow
as
tf
from
tensorflow.contrib
import
slim
import
cv2
import
logging
from
tensorflow.python.platform
import
gfile
from
tensorflow.python.platform
import
app
from
tensorflow.python.platform
import
flags
from
datasets
import
nav_env
import
scripts.script_nav_agent_release
as
sna
import
src.file_utils
as
fu
from
src
import
graph_utils
from
src
import
utils
FLAGS
=
flags
.
FLAGS
flags
.
DEFINE_string
(
'out_dir'
,
'vis'
,
'Directory where to store the output'
)
flags
.
DEFINE_string
(
'type'
,
''
,
'Optional type.'
)
flags
.
DEFINE_bool
(
'first_person'
,
False
,
'Visualize the first person view.'
)
flags
.
DEFINE_bool
(
'top_view'
,
False
,
'Visualize the trajectory in the top view.'
)
flags
.
DEFINE_integer
(
'num_steps'
,
40
,
'Number of steps to run the model for.'
)
flags
.
DEFINE_string
(
'imset'
,
'test'
,
''
)
flags
.
DEFINE_string
(
'base_dir'
,
'output'
,
'Cache directory.'
)
def
_get_suffix_str
():
return
''
def
_load_trajectory
():
base_dir
=
FLAGS
.
base_dir
config_name
=
FLAGS
.
config_name
+
_get_suffix_str
()
dir_name
=
os
.
path
.
join
(
base_dir
,
FLAGS
.
type
,
config_name
)
logging
.
info
(
'Waiting for snapshot in directory %s.'
,
dir_name
)
last_checkpoint
=
slim
.
evaluation
.
wait_for_new_checkpoint
(
dir_name
,
None
)
checkpoint_iter
=
int
(
os
.
path
.
basename
(
last_checkpoint
).
split
(
'-'
)[
1
])
# Load the distances.
a
=
utils
.
load_variables
(
os
.
path
.
join
(
dir_name
,
'bench_on_'
+
FLAGS
.
imset
,
'all_locs_at_t_{:d}.pkl'
.
format
(
checkpoint_iter
)))
return
a
def
_compute_hardness
():
# Load the stanford data to compute the hardness.
if
FLAGS
.
type
==
''
:
args
=
sna
.
get_args_for_config
(
FLAGS
.
config_name
+
'+bench_'
+
FLAGS
.
imset
)
else
:
args
=
sna
.
get_args_for_config
(
FLAGS
.
type
+
'.'
+
FLAGS
.
config_name
+
'+bench_'
+
FLAGS
.
imset
)
args
.
navtask
.
logdir
=
None
R
=
lambda
:
nav_env
.
get_multiplexer_class
(
args
.
navtask
,
0
)
R
=
R
()
rng_data
=
[
np
.
random
.
RandomState
(
0
),
np
.
random
.
RandomState
(
0
)]
# Sample a room.
h_dists
=
[]
gt_dists
=
[]
for
i
in
range
(
250
):
e
=
R
.
sample_env
(
rng_data
)
nodes
=
e
.
task
.
nodes
# Initialize the agent.
init_env_state
=
e
.
reset
(
rng_data
)
gt_dist_to_goal
=
[
e
.
episode
.
dist_to_goal
[
0
][
j
][
s
]
for
j
,
s
in
enumerate
(
e
.
episode
.
start_node_ids
)]
for
j
in
range
(
args
.
navtask
.
task_params
.
batch_size
):
start_node_id
=
e
.
episode
.
start_node_ids
[
j
]
end_node_id
=
e
.
episode
.
goal_node_ids
[
0
][
j
]
h_dist
=
graph_utils
.
heuristic_fn_vec
(
nodes
[[
start_node_id
],:],
nodes
[[
end_node_id
],
:],
n_ori
=
args
.
navtask
.
task_params
.
n_ori
,
step_size
=
args
.
navtask
.
task_params
.
step_size
)[
0
][
0
]
gt_dist
=
e
.
episode
.
dist_to_goal
[
0
][
j
][
start_node_id
]
h_dists
.
append
(
h_dist
)
gt_dists
.
append
(
gt_dist
)
h_dists
=
np
.
array
(
h_dists
)
gt_dists
=
np
.
array
(
gt_dists
)
e
=
R
.
sample_env
([
np
.
random
.
RandomState
(
0
),
np
.
random
.
RandomState
(
0
)])
input
=
e
.
get_common_data
()
orig_maps
=
input
[
'orig_maps'
][
0
,
0
,:,:,
0
]
return
h_dists
,
gt_dists
,
orig_maps
def
plot_trajectory_first_person
(
dt
,
orig_maps
,
out_dir
):
out_dir
=
os
.
path
.
join
(
out_dir
,
FLAGS
.
config_name
+
_get_suffix_str
(),
FLAGS
.
imset
)
fu
.
makedirs
(
out_dir
)
# Load the model so that we can render.
plt
.
set_cmap
(
'gray'
)
samples_per_action
=
8
;
wait_at_action
=
0
;
Writer
=
animation
.
writers
[
'mencoder'
]
writer
=
Writer
(
fps
=
3
*
(
samples_per_action
+
wait_at_action
),
metadata
=
dict
(
artist
=
'anonymous'
),
bitrate
=
1800
)
args
=
sna
.
get_args_for_config
(
FLAGS
.
config_name
+
'+bench_'
+
FLAGS
.
imset
)
args
.
navtask
.
logdir
=
None
navtask_
=
copy
.
deepcopy
(
args
.
navtask
)
navtask_
.
camera_param
.
modalities
=
[
'rgb'
]
navtask_
.
task_params
.
modalities
=
[
'rgb'
]
sz
=
512
navtask_
.
camera_param
.
height
=
sz
navtask_
.
camera_param
.
width
=
sz
navtask_
.
task_params
.
img_height
=
sz
navtask_
.
task_params
.
img_width
=
sz
R
=
lambda
:
nav_env
.
get_multiplexer_class
(
navtask_
,
0
)
R
=
R
()
b
=
R
.
buildings
[
0
]
f
=
[
0
for
_
in
range
(
wait_at_action
)]
+
\
[
float
(
_
)
/
samples_per_action
for
_
in
range
(
samples_per_action
)];
# Generate things for it to render.
inds_to_do
=
[]
inds_to_do
+=
[
1
,
4
,
10
]
#1291, 1268, 1273, 1289, 1302, 1426, 1413, 1449, 1399, 1390]
for
i
in
inds_to_do
:
fig
=
plt
.
figure
(
figsize
=
(
10
,
8
))
gs
=
GridSpec
(
3
,
4
)
gs
.
update
(
wspace
=
0.05
,
hspace
=
0.05
,
left
=
0.0
,
top
=
0.97
,
right
=
1.0
,
bottom
=
0.
)
ax
=
fig
.
add_subplot
(
gs
[:,:
-
1
])
ax1
=
fig
.
add_subplot
(
gs
[
0
,
-
1
])
ax2
=
fig
.
add_subplot
(
gs
[
1
,
-
1
])
ax3
=
fig
.
add_subplot
(
gs
[
2
,
-
1
])
axes
=
[
ax
,
ax1
,
ax2
,
ax3
]
# ax = fig.add_subplot(gs[:,:])
# axes = [ax]
for
ax
in
axes
:
ax
.
set_axis_off
()
node_ids
=
dt
[
'all_node_ids'
][
i
,
:,
0
]
*
1
# Prune so that last node is not repeated more than 3 times?
if
np
.
all
(
node_ids
[
-
4
:]
==
node_ids
[
-
1
]):
while
node_ids
[
-
4
]
==
node_ids
[
-
1
]:
node_ids
=
node_ids
[:
-
1
]
num_steps
=
np
.
minimum
(
FLAGS
.
num_steps
,
len
(
node_ids
))
xyt
=
b
.
to_actual_xyt_vec
(
b
.
task
.
nodes
[
node_ids
])
xyt_diff
=
xyt
[
1
:,:]
-
xyt
[:
-
1
:,:]
xyt_diff
[:,
2
]
=
np
.
mod
(
xyt_diff
[:,
2
],
4
)
ind
=
np
.
where
(
xyt_diff
[:,
2
]
==
3
)[
0
]
xyt_diff
[
ind
,
2
]
=
-
1
xyt_diff
=
np
.
expand_dims
(
xyt_diff
,
axis
=
1
)
to_cat
=
[
xyt_diff
*
_
for
_
in
f
]
perturbs_all
=
np
.
concatenate
(
to_cat
,
axis
=
1
)
perturbs_all
=
np
.
concatenate
([
perturbs_all
,
np
.
zeros_like
(
perturbs_all
[:,:,:
1
])],
axis
=
2
)
node_ids_all
=
np
.
expand_dims
(
node_ids
,
axis
=
1
)
*
1
node_ids_all
=
np
.
concatenate
([
node_ids_all
for
_
in
f
],
axis
=
1
)
node_ids_all
=
np
.
reshape
(
node_ids_all
[:
-
1
,:],
-
1
)
perturbs_all
=
np
.
reshape
(
perturbs_all
,
[
-
1
,
4
])
imgs
=
b
.
render_nodes
(
b
.
task
.
nodes
[
node_ids_all
,:],
perturb
=
perturbs_all
)
# Get action at each node.
actions
=
[]
_
,
action_to_nodes
=
b
.
get_feasible_actions
(
node_ids
)
for
j
in
range
(
num_steps
-
1
):
action_to_node
=
action_to_nodes
[
j
]
node_to_action
=
dict
(
zip
(
action_to_node
.
values
(),
action_to_node
.
keys
()))
actions
.
append
(
node_to_action
[
node_ids
[
j
+
1
]])
def
init_fn
():
return
fig
,
gt_dist_to_goal
=
[]
# Render trajectories.
def
worker
(
j
):
# Plot the image.
step_number
=
j
/
(
samples_per_action
+
wait_at_action
)
img
=
imgs
[
j
];
ax
=
axes
[
0
];
ax
.
clear
();
ax
.
set_axis_off
();
img
=
img
.
astype
(
np
.
uint8
);
ax
.
imshow
(
img
);
tt
=
ax
.
set_title
(
"First Person View
\n
"
+
"Top corners show diagnostics (distance, agents' action) not input to agent."
,
fontsize
=
12
)
plt
.
setp
(
tt
,
color
=
'white'
)
# Distance to goal.
t
=
'Dist to Goal:
\n
{:2d} steps'
.
format
(
int
(
dt
[
'all_d_at_t'
][
i
,
step_number
]))
t
=
ax
.
text
(
0.01
,
0.99
,
t
,
horizontalalignment
=
'left'
,
verticalalignment
=
'top'
,
fontsize
=
20
,
color
=
'red'
,
transform
=
ax
.
transAxes
,
alpha
=
1.0
)
t
.
set_bbox
(
dict
(
color
=
'white'
,
alpha
=
0.85
,
pad
=-
0.1
))
# Action to take.
action_latex
=
[
'$\odot$ '
,
'$\curvearrowright$ '
,
'$\curvearrowleft$ '
,
r
'$\Uparrow$ '
]
t
=
ax
.
text
(
0.99
,
0.99
,
action_latex
[
actions
[
step_number
]],
horizontalalignment
=
'right'
,
verticalalignment
=
'top'
,
fontsize
=
40
,
color
=
'green'
,
transform
=
ax
.
transAxes
,
alpha
=
1.0
)
t
.
set_bbox
(
dict
(
color
=
'white'
,
alpha
=
0.85
,
pad
=-
0.1
))
# Plot the map top view.
ax
=
axes
[
-
1
]
if
j
==
0
:
# Plot the map
locs
=
dt
[
'all_locs'
][
i
,:
num_steps
,:]
goal_loc
=
dt
[
'all_goal_locs'
][
i
,:,:]
xymin
=
np
.
minimum
(
np
.
min
(
goal_loc
,
axis
=
0
),
np
.
min
(
locs
,
axis
=
0
))
xymax
=
np
.
maximum
(
np
.
max
(
goal_loc
,
axis
=
0
),
np
.
max
(
locs
,
axis
=
0
))
xy1
=
(
xymax
+
xymin
)
/
2.
-
0.7
*
np
.
maximum
(
np
.
max
(
xymax
-
xymin
),
24
)
xy2
=
(
xymax
+
xymin
)
/
2.
+
0.7
*
np
.
maximum
(
np
.
max
(
xymax
-
xymin
),
24
)
ax
.
set_axis_on
()
ax
.
patch
.
set_facecolor
((
0.333
,
0.333
,
0.333
))
ax
.
set_xticks
([]);
ax
.
set_yticks
([]);
ax
.
imshow
(
orig_maps
,
origin
=
'lower'
,
vmin
=-
1.0
,
vmax
=
2.0
)
ax
.
plot
(
goal_loc
[:,
0
],
goal_loc
[:,
1
],
'g*'
,
markersize
=
12
)
locs
=
dt
[
'all_locs'
][
i
,:
1
,:]
ax
.
plot
(
locs
[:,
0
],
locs
[:,
1
],
'b.'
,
markersize
=
12
)
ax
.
set_xlim
([
xy1
[
0
],
xy2
[
0
]])
ax
.
set_ylim
([
xy1
[
1
],
xy2
[
1
]])
locs
=
dt
[
'all_locs'
][
i
,
step_number
,:]
locs
=
np
.
expand_dims
(
locs
,
axis
=
0
)
ax
.
plot
(
locs
[:,
0
],
locs
[:,
1
],
'r.'
,
alpha
=
1.0
,
linewidth
=
0
,
markersize
=
4
)
tt
=
ax
.
set_title
(
'Trajectory in topview'
,
fontsize
=
14
)
plt
.
setp
(
tt
,
color
=
'white'
)
return
fig
,
line_ani
=
animation
.
FuncAnimation
(
fig
,
worker
,
(
num_steps
-
1
)
*
(
wait_at_action
+
samples_per_action
),
interval
=
500
,
blit
=
True
,
init_func
=
init_fn
)
tmp_file_name
=
'tmp.mp4'
line_ani
.
save
(
tmp_file_name
,
writer
=
writer
,
savefig_kwargs
=
{
'facecolor'
:
'black'
})
out_file_name
=
os
.
path
.
join
(
out_dir
,
'vis_{:04d}.mp4'
.
format
(
i
))
print
(
out_file_name
)
if
fu
.
exists
(
out_file_name
):
gfile
.
Remove
(
out_file_name
)
gfile
.
Copy
(
tmp_file_name
,
out_file_name
)
gfile
.
Remove
(
tmp_file_name
)
plt
.
close
(
fig
)
def
plot_trajectory
(
dt
,
hardness
,
orig_maps
,
out_dir
):
out_dir
=
os
.
path
.
join
(
out_dir
,
FLAGS
.
config_name
+
_get_suffix_str
(),
FLAGS
.
imset
)
fu
.
makedirs
(
out_dir
)
out_file
=
os
.
path
.
join
(
out_dir
,
'all_locs_at_t.pkl'
)
dt
[
'hardness'
]
=
hardness
utils
.
save_variables
(
out_file
,
dt
.
values
(),
dt
.
keys
(),
overwrite
=
True
)
#Plot trajectories onto the maps
plt
.
set_cmap
(
'gray'
)
for
i
in
range
(
4000
):
goal_loc
=
dt
[
'all_goal_locs'
][
i
,
:,
:]
locs
=
np
.
concatenate
((
dt
[
'all_locs'
][
i
,:,:],
dt
[
'all_locs'
][
i
,:,:]),
axis
=
0
)
xymin
=
np
.
minimum
(
np
.
min
(
goal_loc
,
axis
=
0
),
np
.
min
(
locs
,
axis
=
0
))
xymax
=
np
.
maximum
(
np
.
max
(
goal_loc
,
axis
=
0
),
np
.
max
(
locs
,
axis
=
0
))
xy1
=
(
xymax
+
xymin
)
/
2.
-
1.
*
np
.
maximum
(
np
.
max
(
xymax
-
xymin
),
24
)
xy2
=
(
xymax
+
xymin
)
/
2.
+
1.
*
np
.
maximum
(
np
.
max
(
xymax
-
xymin
),
24
)
fig
,
ax
=
utils
.
tight_imshow_figure
(
plt
,
figsize
=
(
6
,
6
))
ax
.
set_axis_on
()
ax
.
patch
.
set_facecolor
((
0.333
,
0.333
,
0.333
))
ax
.
set_xticks
([])
ax
.
set_yticks
([])
all_locs
=
dt
[
'all_locs'
][
i
,:,:]
*
1
uniq
=
np
.
where
(
np
.
any
(
all_locs
[
1
:,:]
!=
all_locs
[:
-
1
,:],
axis
=
1
))[
0
]
+
1
uniq
=
np
.
sort
(
uniq
).
tolist
()
uniq
.
insert
(
0
,
0
)
uniq
=
np
.
array
(
uniq
)
all_locs
=
all_locs
[
uniq
,
:]
ax
.
plot
(
dt
[
'all_locs'
][
i
,
0
,
0
],
dt
[
'all_locs'
][
i
,
0
,
1
],
'b.'
,
markersize
=
24
)
ax
.
plot
(
dt
[
'all_goal_locs'
][
i
,
0
,
0
],
dt
[
'all_goal_locs'
][
i
,
0
,
1
],
'g*'
,
markersize
=
19
)
ax
.
plot
(
all_locs
[:,
0
],
all_locs
[:,
1
],
'r'
,
alpha
=
0.4
,
linewidth
=
2
)
ax
.
scatter
(
all_locs
[:,
0
],
all_locs
[:,
1
],
c
=
5
+
np
.
arange
(
all_locs
.
shape
[
0
])
*
1.
/
all_locs
.
shape
[
0
],
cmap
=
'Reds'
,
s
=
30
,
linewidth
=
0
)
ax
.
imshow
(
orig_maps
,
origin
=
'lower'
,
vmin
=-
1.0
,
vmax
=
2.0
,
aspect
=
'equal'
)
ax
.
set_xlim
([
xy1
[
0
],
xy2
[
0
]])
ax
.
set_ylim
([
xy1
[
1
],
xy2
[
1
]])
file_name
=
os
.
path
.
join
(
out_dir
,
'trajectory_{:04d}.png'
.
format
(
i
))
print
(
file_name
)
with
fu
.
fopen
(
file_name
,
'w'
)
as
f
:
plt
.
savefig
(
f
)
plt
.
close
(
fig
)
def
main
(
_
):
a
=
_load_trajectory
()
h_dists
,
gt_dists
,
orig_maps
=
_compute_hardness
()
hardness
=
1.
-
h_dists
*
1.
/
gt_dists
if
FLAGS
.
top_view
:
plot_trajectory
(
a
,
hardness
,
orig_maps
,
out_dir
=
FLAGS
.
out_dir
)
if
FLAGS
.
first_person
:
plot_trajectory_first_person
(
a
,
orig_maps
,
out_dir
=
FLAGS
.
out_dir
)
if
__name__
==
'__main__'
:
app
.
run
()
research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import
os
import
glob
import
numpy
as
np
import
logging
import
cPickle
from
datasets
import
nav_env
from
datasets
import
factory
from
src
import
utils
from
src
import
map_utils
as
mu
logging
.
basicConfig
(
level
=
logging
.
INFO
)
DATA_DIR
=
'data/stanford_building_parser_dataset_raw/'
mkdir_if_missing
=
utils
.
mkdir_if_missing
save_variables
=
utils
.
save_variables
def
_get_semantic_maps
(
building_name
,
transform
,
map_
,
flip
,
cats
):
rooms
=
get_room_in_building
(
building_name
)
maps
=
[]
for
cat
in
cats
:
maps
.
append
(
np
.
zeros
((
map_
.
size
[
1
],
map_
.
size
[
0
])))
for
r
in
rooms
:
room
=
load_room
(
building_name
,
r
,
category_list
=
cats
)
classes
=
room
[
'class_id'
]
for
i
,
cat
in
enumerate
(
cats
):
c_ind
=
cats
.
index
(
cat
)
ind
=
[
_
for
_
,
c
in
enumerate
(
classes
)
if
c
==
c_ind
]
if
len
(
ind
)
>
0
:
vs
=
[
room
[
'vertexs'
][
x
]
*
1
for
x
in
ind
]
vs
=
np
.
concatenate
(
vs
,
axis
=
0
)
if
transform
:
vs
=
np
.
array
([
vs
[:,
1
],
vs
[:,
0
],
vs
[:,
2
]]).
T
vs
[:,
0
]
=
-
vs
[:,
0
]
vs
[:,
1
]
+=
4.20
vs
[:,
0
]
+=
6.20
vs
=
vs
*
100.
if
flip
:
vs
[:,
1
]
=
-
vs
[:,
1
]
maps
[
i
]
=
maps
[
i
]
+
\
mu
.
_project_to_map
(
map_
,
vs
,
ignore_points_outside_map
=
True
)
return
maps
def
_map_building_name
(
building_name
):
b
=
int
(
building_name
.
split
(
'_'
)[
0
][
4
])
out_name
=
'Area_{:d}'
.
format
(
b
)
if
b
==
5
:
if
int
(
building_name
.
split
(
'_'
)[
0
][
5
])
==
1
:
transform
=
True
else
:
transform
=
False
else
:
transform
=
False
return
out_name
,
transform
def
get_categories
():
cats
=
[
'beam'
,
'board'
,
'bookcase'
,
'ceiling'
,
'chair'
,
'clutter'
,
'column'
,
'door'
,
'floor'
,
'sofa'
,
'table'
,
'wall'
,
'window'
]
return
cats
def
_write_map_files
(
b_in
,
b_out
,
transform
):
cats
=
get_categories
()
env
=
utils
.
Foo
(
padding
=
10
,
resolution
=
5
,
num_point_threshold
=
2
,
valid_min
=-
10
,
valid_max
=
200
,
n_samples_per_face
=
200
)
robot
=
utils
.
Foo
(
radius
=
15
,
base
=
10
,
height
=
140
,
sensor_height
=
120
,
camera_elevation_degree
=-
15
)
building_loader
=
factory
.
get_dataset
(
'sbpd'
)
for
flip
in
[
False
,
True
]:
b
=
nav_env
.
Building
(
b_out
,
robot
,
env
,
flip
=
flip
,
building_loader
=
building_loader
)
logging
.
info
(
"building_in: %s, building_out: %s, transform: %d"
,
b_in
,
b_out
,
transform
)
maps
=
_get_semantic_maps
(
b_in
,
transform
,
b
.
map
,
flip
,
cats
)
maps
=
np
.
transpose
(
np
.
array
(
maps
),
axes
=
[
1
,
2
,
0
])
# Load file from the cache.
file_name
=
'{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl'
file_name
=
file_name
.
format
(
b
.
building_name
,
b
.
map
.
size
[
0
],
b
.
map
.
size
[
1
],
b
.
map
.
origin
[
0
],
b
.
map
.
origin
[
1
],
b
.
map
.
resolution
,
flip
)
out_file
=
os
.
path
.
join
(
DATA_DIR
,
'processing'
,
'class-maps'
,
file_name
)
logging
.
info
(
'Writing semantic maps to %s.'
,
out_file
)
save_variables
(
out_file
,
[
maps
,
cats
],
[
'maps'
,
'cats'
],
overwrite
=
True
)
def
_transform_area5b
(
room_dimension
):
for
a
in
room_dimension
.
keys
():
r
=
room_dimension
[
a
]
*
1
r
[[
0
,
1
,
3
,
4
]]
=
r
[[
1
,
0
,
4
,
3
]]
r
[[
0
,
3
]]
=
-
r
[[
3
,
0
]]
r
[[
1
,
4
]]
+=
4.20
r
[[
0
,
3
]]
+=
6.20
room_dimension
[
a
]
=
r
return
room_dimension
def
collect_room
(
building_name
,
room_name
):
room_dir
=
os
.
path
.
join
(
DATA_DIR
,
'Stanford3dDataset_v1.2'
,
building_name
,
room_name
,
'Annotations'
)
files
=
glob
.
glob1
(
room_dir
,
'*.txt'
)
files
=
sorted
(
files
,
key
=
lambda
s
:
s
.
lower
())
vertexs
=
[];
colors
=
[];
for
f
in
files
:
file_name
=
os
.
path
.
join
(
room_dir
,
f
)
logging
.
info
(
' %s'
,
file_name
)
a
=
np
.
loadtxt
(
file_name
)
vertex
=
a
[:,:
3
]
*
1.
color
=
a
[:,
3
:]
*
1
color
=
color
.
astype
(
np
.
uint8
)
vertexs
.
append
(
vertex
)
colors
.
append
(
color
)
files
=
[
f
.
split
(
'.'
)[
0
]
for
f
in
files
]
out
=
{
'vertexs'
:
vertexs
,
'colors'
:
colors
,
'names'
:
files
}
return
out
def
load_room
(
building_name
,
room_name
,
category_list
=
None
):
room
=
collect_room
(
building_name
,
room_name
)
room
[
'building_name'
]
=
building_name
room
[
'room_name'
]
=
room_name
instance_id
=
range
(
len
(
room
[
'names'
]))
room
[
'instance_id'
]
=
instance_id
if
category_list
is
not
None
:
name
=
[
r
.
split
(
'_'
)[
0
]
for
r
in
room
[
'names'
]]
class_id
=
[]
for
n
in
name
:
if
n
in
category_list
:
class_id
.
append
(
category_list
.
index
(
n
))
else
:
class_id
.
append
(
len
(
category_list
))
room
[
'class_id'
]
=
class_id
room
[
'category_list'
]
=
category_list
return
room
def
get_room_in_building
(
building_name
):
building_dir
=
os
.
path
.
join
(
DATA_DIR
,
'Stanford3dDataset_v1.2'
,
building_name
)
rn
=
os
.
listdir
(
building_dir
)
rn
=
[
x
for
x
in
rn
if
os
.
path
.
isdir
(
os
.
path
.
join
(
building_dir
,
x
))]
rn
=
sorted
(
rn
,
key
=
lambda
s
:
s
.
lower
())
return
rn
def
write_room_dimensions
(
b_in
,
b_out
,
transform
):
rooms
=
get_room_in_building
(
b_in
)
room_dimension
=
{}
for
r
in
rooms
:
room
=
load_room
(
b_in
,
r
,
category_list
=
None
)
vertex
=
np
.
concatenate
(
room
[
'vertexs'
],
axis
=
0
)
room_dimension
[
r
]
=
np
.
concatenate
((
np
.
min
(
vertex
,
axis
=
0
),
np
.
max
(
vertex
,
axis
=
0
)),
axis
=
0
)
if
transform
==
1
:
room_dimension
=
_transform_area5b
(
room_dimension
)
out_file
=
os
.
path
.
join
(
DATA_DIR
,
'processing'
,
'room-dimension'
,
b_out
+
'.pkl'
)
save_variables
(
out_file
,
[
room_dimension
],
[
'room_dimension'
],
overwrite
=
True
)
def
write_room_dimensions_all
(
I
):
mkdir_if_missing
(
os
.
path
.
join
(
DATA_DIR
,
'processing'
,
'room-dimension'
))
bs_in
=
[
'Area_1'
,
'Area_2'
,
'Area_3'
,
'Area_4'
,
'Area_5'
,
'Area_5'
,
'Area_6'
]
bs_out
=
[
'area1'
,
'area2'
,
'area3'
,
'area4'
,
'area5a'
,
'area5b'
,
'area6'
]
transforms
=
[
0
,
0
,
0
,
0
,
0
,
1
,
0
]
for
i
in
I
:
b_in
=
bs_in
[
i
]
b_out
=
bs_out
[
i
]
t
=
transforms
[
i
]
write_room_dimensions
(
b_in
,
b_out
,
t
)
def
write_class_maps_all
(
I
):
mkdir_if_missing
(
os
.
path
.
join
(
DATA_DIR
,
'processing'
,
'class-maps'
))
bs_in
=
[
'Area_1'
,
'Area_2'
,
'Area_3'
,
'Area_4'
,
'Area_5'
,
'Area_5'
,
'Area_6'
]
bs_out
=
[
'area1'
,
'area2'
,
'area3'
,
'area4'
,
'area5a'
,
'area5b'
,
'area6'
]
transforms
=
[
0
,
0
,
0
,
0
,
0
,
1
,
0
]
for
i
in
I
:
b_in
=
bs_in
[
i
]
b_out
=
bs_out
[
i
]
t
=
transforms
[
i
]
_write_map_files
(
b_in
,
b_out
,
t
)
if
__name__
==
'__main__'
:
write_room_dimensions_all
([
0
,
2
,
3
,
4
,
5
,
6
])
write_class_maps_all
([
0
,
2
,
3
,
4
,
5
,
6
])
research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.sh
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
cd
data/stanford_building_parser_dataset_raw
unzip Stanford3dDataset_v1.2.zip
cd
../../
PYOPENGL_PLATFORM
=
egl
PYTHONPATH
=
'.'
python scripts/script_preprocess_annoations_S3DIS.py
mv
data/stanford_building_parser_dataset_raw/processing/room-dimension data/stanford_building_parser_dataset/.
mv
data/stanford_building_parser_dataset_raw/processing/class-maps data/stanford_building_parser_dataset/.
echo
"You may now delete data/stanford_building_parser_dataset_raw if needed."
research/cognitive_mapping_and_planning/scripts/script_preprocess_meshes_S3DIS.sh
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
mkdir
-p
data/stanford_building_parser_dataset
mkdir
-p
data/stanford_building_parser_dataset/mesh
cd
data/stanford_building_parser_dataset_raw
# Untar the files and extract the meshes.
for
t
in
"1"
"3"
"4"
"5a"
"5b"
"6"
;
do
tar
-xf
area_
"
$t
"
_noXYZ.tar area_
$t
/3d/rgb_textures
mv
area_
$t
/3d/rgb_textures ../stanford_building_parser_dataset/mesh/area
$t
rmdir
area_
$t
/3d
rmdir
area_
$t
done
cd
../../
# Preprocess meshes to remove the group and chunk information.
cd
data/stanford_building_parser_dataset/
for
t
in
"1"
"3"
"4"
"5a"
"5b"
"6"
;
do
obj_name
=
`
ls
mesh/area
$t
/
*
.obj
`
cp
$obj_name
"
$obj_name
"
.bck
cat
$obj_name
.bck |
grep
-v
'^g'
|
grep
-v
'^o'
>
$obj_name
done
cd
../../
research/cognitive_mapping_and_planning/scripts/script_test_pretrained_models.sh
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Test CMP models.
CUDA_VISIBLE_DEVICES
=
0
LD_LIBRARY_PATH
=
/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH
=
'.'
PYOPENGL_PLATFORM
=
egl
\
python scripts/script_nav_agent_release.py
--config_name
cmp.lmap_Msc.clip5.sbpd_d_r2r+bench_test
\
--logdir
output/cmp.lmap_Msc.clip5.sbpd_d_r2r
CUDA_VISIBLE_DEVICES
=
0
LD_LIBRARY_PATH
=
/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH
=
'.'
PYOPENGL_PLATFORM
=
egl
\
python scripts/script_nav_agent_release.py
--config_name
cmp.lmap_Msc.clip5.sbpd_rgb_r2r+bench_test
\
--logdir
output/cmp.lmap_Msc.clip5.sbpd_rgb_r2r
CUDA_VISIBLE_DEVICES
=
0
LD_LIBRARY_PATH
=
/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH
=
'.'
PYOPENGL_PLATFORM
=
egl
\
python scripts/script_nav_agent_release.py
--config_name
cmp.lmap_Msc.clip5.sbpd_d_ST+bench_test
\
--logdir
output/cmp.lmap_Msc.clip5.sbpd_d_ST
CUDA_VISIBLE_DEVICES
=
0
LD_LIBRARY_PATH
=
/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH
=
'.'
PYOPENGL_PLATFORM
=
egl
\
python scripts/script_nav_agent_release.py
--config_name
cmp.lmap_Msc.clip5.sbpd_rgb_ST+bench_test
\
--logdir
output/cmp.lmap_Msc.clip5.sbpd_rgb_ST
CUDA_VISIBLE_DEVICES
=
0
LD_LIBRARY_PATH
=
/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH
=
'.'
PYOPENGL_PLATFORM
=
egl
\
python scripts/script_nav_agent_release.py
--config_name
cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80+bench_test
\
--logdir
output/cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80
# Test LSTM baseline models.
CUDA_VISIBLE_DEVICES
=
0
LD_LIBRARY_PATH
=
/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH
=
'.'
PYOPENGL_PLATFORM
=
egl
\
python scripts/script_nav_agent_release.py
--config_name
bl.v2.noclip.sbpd_d_r2r+bench_test
\
--logdir
output/bl.v2.noclip.sbpd_d_r2r
CUDA_VISIBLE_DEVICES
=
0
LD_LIBRARY_PATH
=
/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH
=
'.'
PYOPENGL_PLATFORM
=
egl
\
python scripts/script_nav_agent_release.py
--config_name
bl.v2.noclip.sbpd_rgb_r2r+bench_test
\
--logdir
output/bl.v2.noclip.sbpd_rgb_r2r
CUDA_VISIBLE_DEVICES
=
0
LD_LIBRARY_PATH
=
/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH
=
'.'
PYOPENGL_PLATFORM
=
egl
\
python scripts/script_nav_agent_release.py
--config_name
bl.v2.noclip.sbpd_d_ST+bench_test
\
--logdir
output/bl.v2.noclip.sbpd_d_ST
CUDA_VISIBLE_DEVICES
=
0
LD_LIBRARY_PATH
=
/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH
=
'.'
PYOPENGL_PLATFORM
=
egl
\
python scripts/script_nav_agent_release.py
--config_name
bl.v2.noclip.sbpd_rgb_ST+bench_test
\
--logdir
output/bl.v2.noclip.sbpd_rgb_ST
CUDA_VISIBLE_DEVICES
=
0
LD_LIBRARY_PATH
=
/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH
=
'.'
PYOPENGL_PLATFORM
=
egl
\
python scripts/script_nav_agent_release.py
--config_name
bl.v2.noclip.sbpd_d_r2r_h0_64_80+bench_test
\
--logdir
output/bl.v2.noclip.sbpd_d_r2r_h0_64_80
# Visualize test trajectories in top view.
# CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \
# python scripts/script_plot_trajectory.py \
# --first_person --num_steps 40 \
# --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r \
# --imset test --alsologtostderr
research/cognitive_mapping_and_planning/src/__init__.py
deleted
100644 → 0
View file @
09bc9f54
research/cognitive_mapping_and_planning/src/depth_utils.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for processing depth images.
"""
import
numpy
as
np
import
src.rotation_utils
as
ru
import
src.utils
as
utils
def
get_camera_matrix
(
width
,
height
,
fov
):
"""Returns a camera matrix from image size and fov."""
xc
=
(
width
-
1.
)
/
2.
zc
=
(
height
-
1.
)
/
2.
f
=
(
width
/
2.
)
/
np
.
tan
(
np
.
deg2rad
(
fov
/
2.
))
camera_matrix
=
utils
.
Foo
(
xc
=
xc
,
zc
=
zc
,
f
=
f
)
return
camera_matrix
def
get_point_cloud_from_z
(
Y
,
camera_matrix
):
"""Projects the depth image Y into a 3D point cloud.
Inputs:
Y is ...xHxW
camera_matrix
Outputs:
X is positive going right
Y is positive into the image
Z is positive up in the image
XYZ is ...xHxWx3
"""
x
,
z
=
np
.
meshgrid
(
np
.
arange
(
Y
.
shape
[
-
1
]),
np
.
arange
(
Y
.
shape
[
-
2
]
-
1
,
-
1
,
-
1
))
for
i
in
range
(
Y
.
ndim
-
2
):
x
=
np
.
expand_dims
(
x
,
axis
=
0
)
z
=
np
.
expand_dims
(
z
,
axis
=
0
)
X
=
(
x
-
camera_matrix
.
xc
)
*
Y
/
camera_matrix
.
f
Z
=
(
z
-
camera_matrix
.
zc
)
*
Y
/
camera_matrix
.
f
XYZ
=
np
.
concatenate
((
X
[...,
np
.
newaxis
],
Y
[...,
np
.
newaxis
],
Z
[...,
np
.
newaxis
]),
axis
=
X
.
ndim
)
return
XYZ
def
make_geocentric
(
XYZ
,
sensor_height
,
camera_elevation_degree
):
"""Transforms the point cloud into geocentric coordinate frame.
Input:
XYZ : ...x3
sensor_height : height of the sensor
camera_elevation_degree : camera elevation to rectify.
Output:
XYZ : ...x3
"""
R
=
ru
.
get_r_matrix
([
1.
,
0.
,
0.
],
angle
=
np
.
deg2rad
(
camera_elevation_degree
))
XYZ
=
np
.
matmul
(
XYZ
.
reshape
(
-
1
,
3
),
R
.
T
).
reshape
(
XYZ
.
shape
)
XYZ
[...,
2
]
=
XYZ
[...,
2
]
+
sensor_height
return
XYZ
def
bin_points
(
XYZ_cms
,
map_size
,
z_bins
,
xy_resolution
):
"""Bins points into xy-z bins
XYZ_cms is ... x H x W x3
Outputs is ... x map_size x map_size x (len(z_bins)+1)
"""
sh
=
XYZ_cms
.
shape
XYZ_cms
=
XYZ_cms
.
reshape
([
-
1
,
sh
[
-
3
],
sh
[
-
2
],
sh
[
-
1
]])
n_z_bins
=
len
(
z_bins
)
+
1
map_center
=
(
map_size
-
1.
)
/
2.
counts
=
[]
isvalids
=
[]
for
XYZ_cm
in
XYZ_cms
:
isnotnan
=
np
.
logical_not
(
np
.
isnan
(
XYZ_cm
[:,:,
0
]))
X_bin
=
np
.
round
(
XYZ_cm
[:,:,
0
]
/
xy_resolution
+
map_center
).
astype
(
np
.
int32
)
Y_bin
=
np
.
round
(
XYZ_cm
[:,:,
1
]
/
xy_resolution
+
map_center
).
astype
(
np
.
int32
)
Z_bin
=
np
.
digitize
(
XYZ_cm
[:,:,
2
],
bins
=
z_bins
).
astype
(
np
.
int32
)
isvalid
=
np
.
array
([
X_bin
>=
0
,
X_bin
<
map_size
,
Y_bin
>=
0
,
Y_bin
<
map_size
,
Z_bin
>=
0
,
Z_bin
<
n_z_bins
,
isnotnan
])
isvalid
=
np
.
all
(
isvalid
,
axis
=
0
)
ind
=
(
Y_bin
*
map_size
+
X_bin
)
*
n_z_bins
+
Z_bin
ind
[
np
.
logical_not
(
isvalid
)]
=
0
count
=
np
.
bincount
(
ind
.
ravel
(),
isvalid
.
ravel
().
astype
(
np
.
int32
),
minlength
=
map_size
*
map_size
*
n_z_bins
)
count
=
np
.
reshape
(
count
,
[
map_size
,
map_size
,
n_z_bins
])
counts
.
append
(
count
)
isvalids
.
append
(
isvalid
)
counts
=
np
.
array
(
counts
).
reshape
(
list
(
sh
[:
-
3
])
+
[
map_size
,
map_size
,
n_z_bins
])
isvalids
=
np
.
array
(
isvalids
).
reshape
(
list
(
sh
[:
-
3
])
+
[
sh
[
-
3
],
sh
[
-
2
],
1
])
return
counts
,
isvalids
research/cognitive_mapping_and_planning/src/file_utils.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for manipulating files.
"""
import
os
import
numpy
as
np
import
PIL
from
tensorflow.python.platform
import
gfile
import
cv2
exists
=
lambda
path
:
gfile
.
Exists
(
path
)
fopen
=
lambda
path
,
mode
:
gfile
.
Open
(
path
,
mode
)
makedirs
=
lambda
path
:
gfile
.
MakeDirs
(
path
)
listdir
=
lambda
path
:
gfile
.
ListDir
(
path
)
copyfile
=
lambda
a
,
b
,
o
:
gfile
.
Copy
(
a
,
b
,
o
)
def
write_image
(
image_path
,
rgb
):
ext
=
os
.
path
.
splitext
(
image_path
)[
1
]
with
gfile
.
GFile
(
image_path
,
'w'
)
as
f
:
img_str
=
cv2
.
imencode
(
ext
,
rgb
[:,:,::
-
1
])[
1
].
tostring
()
f
.
write
(
img_str
)
def
read_image
(
image_path
,
type
=
'rgb'
):
with
fopen
(
image_path
,
'r'
)
as
f
:
I
=
PIL
.
Image
.
open
(
f
)
II
=
np
.
array
(
I
)
if
type
==
'rgb'
:
II
=
II
[:,:,:
3
]
return
II
research/cognitive_mapping_and_planning/src/graph_utils.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various function to manipulate graphs for computing distances.
"""
import
skimage.morphology
import
numpy
as
np
import
networkx
as
nx
import
itertools
import
logging
from
datasets.nav_env
import
get_path_ids
import
graph_tool
as
gt
import
graph_tool.topology
import
graph_tool.generation
import
src.utils
as
utils
# Compute shortest path from all nodes to or from all source nodes
def
get_distance_node_list
(
gtG
,
source_nodes
,
direction
,
weights
=
None
):
gtG_
=
gt
.
Graph
(
gtG
)
v
=
gtG_
.
add_vertex
()
if
weights
is
not
None
:
weights
=
gtG_
.
edge_properties
[
weights
]
for
s
in
source_nodes
:
e
=
gtG_
.
add_edge
(
s
,
int
(
v
))
if
weights
is
not
None
:
weights
[
e
]
=
0.
if
direction
==
'to'
:
dist
=
gt
.
topology
.
shortest_distance
(
gt
.
GraphView
(
gtG_
,
reversed
=
True
),
source
=
gtG_
.
vertex
(
int
(
v
)),
target
=
None
,
weights
=
weights
)
elif
direction
==
'from'
:
dist
=
gt
.
topology
.
shortest_distance
(
gt
.
GraphView
(
gtG_
,
reversed
=
False
),
source
=
gtG_
.
vertex
(
int
(
v
)),
target
=
None
,
weights
=
weights
)
dist
=
np
.
array
(
dist
.
get_array
())
dist
=
dist
[:
-
1
]
if
weights
is
None
:
dist
=
dist
-
1
return
dist
# Functions for semantically labelling nodes in the traversal graph.
def
generate_lattice
(
sz_x
,
sz_y
):
"""Generates a lattice with sz_x vertices along x and sz_y vertices along y
direction Each of these vertices is step_size distance apart. Origin is at
(0,0). """
g
=
gt
.
generation
.
lattice
([
sz_x
,
sz_y
])
x
,
y
=
np
.
meshgrid
(
np
.
arange
(
sz_x
),
np
.
arange
(
sz_y
))
x
=
np
.
reshape
(
x
,
[
-
1
,
1
]);
y
=
np
.
reshape
(
y
,
[
-
1
,
1
]);
nodes
=
np
.
concatenate
((
x
,
y
),
axis
=
1
)
return
g
,
nodes
def
add_diagonal_edges
(
g
,
nodes
,
sz_x
,
sz_y
,
edge_len
):
offset
=
[
sz_x
+
1
,
sz_x
-
1
]
for
o
in
offset
:
s
=
np
.
arange
(
nodes
.
shape
[
0
]
-
o
-
1
)
t
=
s
+
o
ind
=
np
.
all
(
np
.
abs
(
nodes
[
s
,:]
-
nodes
[
t
,:])
==
np
.
array
([[
1
,
1
]]),
axis
=
1
)
s
=
s
[
ind
][:,
np
.
newaxis
]
t
=
t
[
ind
][:,
np
.
newaxis
]
st
=
np
.
concatenate
((
s
,
t
),
axis
=
1
)
for
i
in
range
(
st
.
shape
[
0
]):
e
=
g
.
add_edge
(
st
[
i
,
0
],
st
[
i
,
1
],
add_missing
=
False
)
g
.
ep
[
'wts'
][
e
]
=
edge_len
def
convert_traversible_to_graph
(
traversible
,
ff_cost
=
1.
,
fo_cost
=
1.
,
oo_cost
=
1.
,
connectivity
=
4
):
assert
(
connectivity
==
4
or
connectivity
==
8
)
sz_x
=
traversible
.
shape
[
1
]
sz_y
=
traversible
.
shape
[
0
]
g
,
nodes
=
generate_lattice
(
sz_x
,
sz_y
)
# Assign costs.
edge_wts
=
g
.
new_edge_property
(
'float'
)
g
.
edge_properties
[
'wts'
]
=
edge_wts
wts
=
np
.
ones
(
g
.
num_edges
(),
dtype
=
np
.
float32
)
edge_wts
.
get_array
()[:]
=
wts
if
connectivity
==
8
:
add_diagonal_edges
(
g
,
nodes
,
sz_x
,
sz_y
,
np
.
sqrt
(
2.
))
se
=
np
.
array
([[
int
(
e
.
source
()),
int
(
e
.
target
())]
for
e
in
g
.
edges
()])
s_xy
=
nodes
[
se
[:,
0
]]
t_xy
=
nodes
[
se
[:,
1
]]
s_t
=
np
.
ravel_multi_index
((
s_xy
[:,
1
],
s_xy
[:,
0
]),
traversible
.
shape
)
t_t
=
np
.
ravel_multi_index
((
t_xy
[:,
1
],
t_xy
[:,
0
]),
traversible
.
shape
)
s_t
=
traversible
.
ravel
()[
s_t
]
t_t
=
traversible
.
ravel
()[
t_t
]
wts
=
np
.
zeros
(
g
.
num_edges
(),
dtype
=
np
.
float32
)
wts
[
np
.
logical_and
(
s_t
==
True
,
t_t
==
True
)]
=
ff_cost
wts
[
np
.
logical_and
(
s_t
==
False
,
t_t
==
False
)]
=
oo_cost
wts
[
np
.
logical_xor
(
s_t
,
t_t
)]
=
fo_cost
edge_wts
=
g
.
edge_properties
[
'wts'
]
for
i
,
e
in
enumerate
(
g
.
edges
()):
edge_wts
[
e
]
=
edge_wts
[
e
]
*
wts
[
i
]
# d = edge_wts.get_array()*1.
# edge_wts.get_array()[:] = d*wts
return
g
,
nodes
def
label_nodes_with_class
(
nodes_xyt
,
class_maps
,
pix
):
"""
Returns:
class_maps__: one-hot class_map for each class.
node_class_label: one-hot class_map for each class, nodes_xyt.shape[0] x n_classes
"""
# Assign each pixel to a node.
selem
=
skimage
.
morphology
.
disk
(
pix
)
class_maps_
=
class_maps
*
1.
for
i
in
range
(
class_maps
.
shape
[
2
]):
class_maps_
[:,:,
i
]
=
skimage
.
morphology
.
dilation
(
class_maps
[:,:,
i
]
*
1
,
selem
)
class_maps__
=
np
.
argmax
(
class_maps_
,
axis
=
2
)
class_maps__
[
np
.
max
(
class_maps_
,
axis
=
2
)
==
0
]
=
-
1
# For each node pick out the label from this class map.
x
=
np
.
round
(
nodes_xyt
[:,[
0
]]).
astype
(
np
.
int32
)
y
=
np
.
round
(
nodes_xyt
[:,[
1
]]).
astype
(
np
.
int32
)
ind
=
np
.
ravel_multi_index
((
y
,
x
),
class_maps__
.
shape
)
node_class_label
=
class_maps__
.
ravel
()[
ind
][:,
0
]
# Convert to one hot versions.
class_maps_one_hot
=
np
.
zeros
(
class_maps
.
shape
,
dtype
=
np
.
bool
)
node_class_label_one_hot
=
np
.
zeros
((
node_class_label
.
shape
[
0
],
class_maps
.
shape
[
2
]),
dtype
=
np
.
bool
)
for
i
in
range
(
class_maps
.
shape
[
2
]):
class_maps_one_hot
[:,:,
i
]
=
class_maps__
==
i
node_class_label_one_hot
[:,
i
]
=
node_class_label
==
i
return
class_maps_one_hot
,
node_class_label_one_hot
def
label_nodes_with_class_geodesic
(
nodes_xyt
,
class_maps
,
pix
,
traversible
,
ff_cost
=
1.
,
fo_cost
=
1.
,
oo_cost
=
1.
,
connectivity
=
4
):
"""Labels nodes in nodes_xyt with class labels using geodesic distance as
defined by traversible from class_maps.
Inputs:
nodes_xyt
class_maps: counts for each class.
pix: distance threshold to consider close enough to target.
traversible: binary map of whether traversible or not.
Output:
labels: For each node in nodes_xyt returns a label of the class or -1 is
unlabelled.
"""
g
,
nodes
=
convert_traversible_to_graph
(
traversible
,
ff_cost
=
ff_cost
,
fo_cost
=
fo_cost
,
oo_cost
=
oo_cost
,
connectivity
=
connectivity
)
class_dist
=
np
.
zeros_like
(
class_maps
*
1.
)
n_classes
=
class_maps
.
shape
[
2
]
if
False
:
# Assign each pixel to a class based on number of points.
selem
=
skimage
.
morphology
.
disk
(
pix
)
class_maps_
=
class_maps
*
1.
class_maps__
=
np
.
argmax
(
class_maps_
,
axis
=
2
)
class_maps__
[
np
.
max
(
class_maps_
,
axis
=
2
)
==
0
]
=
-
1
# Label nodes with classes.
for
i
in
range
(
n_classes
):
# class_node_ids = np.where(class_maps__.ravel() == i)[0]
class_node_ids
=
np
.
where
(
class_maps
[:,:,
i
].
ravel
()
>
0
)[
0
]
dist_i
=
get_distance_node_list
(
g
,
class_node_ids
,
'to'
,
weights
=
'wts'
)
class_dist
[:,:,
i
]
=
np
.
reshape
(
dist_i
,
class_dist
[:,:,
i
].
shape
)
class_map_geodesic
=
(
class_dist
<=
pix
)
class_map_geodesic
=
np
.
reshape
(
class_map_geodesic
,
[
-
1
,
n_classes
])
# For each node pick out the label from this class map.
x
=
np
.
round
(
nodes_xyt
[:,[
0
]]).
astype
(
np
.
int32
)
y
=
np
.
round
(
nodes_xyt
[:,[
1
]]).
astype
(
np
.
int32
)
ind
=
np
.
ravel_multi_index
((
y
,
x
),
class_dist
[:,:,
0
].
shape
)
node_class_label
=
class_map_geodesic
[
ind
[:,
0
],:]
class_map_geodesic
=
class_dist
<=
pix
return
class_map_geodesic
,
node_class_label
def
_get_next_nodes_undirected
(
n
,
sc
,
n_ori
):
nodes_to_add
=
[]
nodes_to_validate
=
[]
(
p
,
q
,
r
)
=
n
nodes_to_add
.
append
((
n
,
(
p
,
q
,
r
),
0
))
if
n_ori
==
4
:
for
_
in
[
1
,
2
,
3
,
4
]:
if
_
==
1
:
v
=
(
p
-
sc
,
q
,
r
)
elif
_
==
2
:
v
=
(
p
+
sc
,
q
,
r
)
elif
_
==
3
:
v
=
(
p
,
q
-
sc
,
r
)
elif
_
==
4
:
v
=
(
p
,
q
+
sc
,
r
)
nodes_to_validate
.
append
((
n
,
v
,
_
))
return
nodes_to_add
,
nodes_to_validate
def
_get_next_nodes
(
n
,
sc
,
n_ori
):
nodes_to_add
=
[]
nodes_to_validate
=
[]
(
p
,
q
,
r
)
=
n
for
r_
,
a_
in
zip
([
-
1
,
0
,
1
],
[
1
,
0
,
2
]):
nodes_to_add
.
append
((
n
,
(
p
,
q
,
np
.
mod
(
r
+
r_
,
n_ori
)),
a_
))
if
n_ori
==
6
:
if
r
==
0
:
v
=
(
p
+
sc
,
q
,
r
)
elif
r
==
1
:
v
=
(
p
+
sc
,
q
+
sc
,
r
)
elif
r
==
2
:
v
=
(
p
,
q
+
sc
,
r
)
elif
r
==
3
:
v
=
(
p
-
sc
,
q
,
r
)
elif
r
==
4
:
v
=
(
p
-
sc
,
q
-
sc
,
r
)
elif
r
==
5
:
v
=
(
p
,
q
-
sc
,
r
)
elif
n_ori
==
4
:
if
r
==
0
:
v
=
(
p
+
sc
,
q
,
r
)
elif
r
==
1
:
v
=
(
p
,
q
+
sc
,
r
)
elif
r
==
2
:
v
=
(
p
-
sc
,
q
,
r
)
elif
r
==
3
:
v
=
(
p
,
q
-
sc
,
r
)
nodes_to_validate
.
append
((
n
,
v
,
3
))
return
nodes_to_add
,
nodes_to_validate
def
generate_graph
(
valid_fn_vec
=
None
,
sc
=
1.
,
n_ori
=
6
,
starting_location
=
(
0
,
0
,
0
),
vis
=
False
,
directed
=
True
):
timer
=
utils
.
Timer
()
timer
.
tic
()
if
directed
:
G
=
nx
.
DiGraph
(
directed
=
True
)
else
:
G
=
nx
.
Graph
()
G
.
add_node
(
starting_location
)
new_nodes
=
G
.
nodes
()
while
len
(
new_nodes
)
!=
0
:
nodes_to_add
=
[]
nodes_to_validate
=
[]
for
n
in
new_nodes
:
if
directed
:
na
,
nv
=
_get_next_nodes
(
n
,
sc
,
n_ori
)
else
:
na
,
nv
=
_get_next_nodes_undirected
(
n
,
sc
,
n_ori
)
nodes_to_add
=
nodes_to_add
+
na
if
valid_fn_vec
is
not
None
:
nodes_to_validate
=
nodes_to_validate
+
nv
else
:
node_to_add
=
nodes_to_add
+
nv
# Validate nodes.
vs
=
[
_
[
1
]
for
_
in
nodes_to_validate
]
valids
=
valid_fn_vec
(
vs
)
for
nva
,
valid
in
zip
(
nodes_to_validate
,
valids
):
if
valid
:
nodes_to_add
.
append
(
nva
)
new_nodes
=
[]
for
n
,
v
,
a
in
nodes_to_add
:
if
not
G
.
has_node
(
v
):
new_nodes
.
append
(
v
)
G
.
add_edge
(
n
,
v
,
action
=
a
)
timer
.
toc
(
average
=
True
,
log_at
=
1
,
log_str
=
'src.graph_utils.generate_graph'
)
return
(
G
)
def
vis_G
(
G
,
ax
,
vertex_color
=
'r'
,
edge_color
=
'b'
,
r
=
None
):
if
edge_color
is
not
None
:
for
e
in
G
.
edges
():
XYT
=
zip
(
*
e
)
x
=
XYT
[
-
3
]
y
=
XYT
[
-
2
]
t
=
XYT
[
-
1
]
if
r
is
None
or
t
[
0
]
==
r
:
ax
.
plot
(
x
,
y
,
edge_color
)
if
vertex_color
is
not
None
:
XYT
=
zip
(
*
G
.
nodes
())
x
=
XYT
[
-
3
]
y
=
XYT
[
-
2
]
t
=
XYT
[
-
1
]
ax
.
plot
(
x
,
y
,
vertex_color
+
'.'
)
def
convert_to_graph_tool
(
G
):
timer
=
utils
.
Timer
()
timer
.
tic
()
gtG
=
gt
.
Graph
(
directed
=
G
.
is_directed
())
gtG
.
ep
[
'action'
]
=
gtG
.
new_edge_property
(
'int'
)
nodes_list
=
G
.
nodes
()
nodes_array
=
np
.
array
(
nodes_list
)
nodes_id
=
np
.
zeros
((
nodes_array
.
shape
[
0
],),
dtype
=
np
.
int64
)
for
i
in
range
(
nodes_array
.
shape
[
0
]):
v
=
gtG
.
add_vertex
()
nodes_id
[
i
]
=
int
(
v
)
# d = {key: value for (key, value) in zip(nodes_list, nodes_id)}
d
=
dict
(
itertools
.
izip
(
nodes_list
,
nodes_id
))
for
src
,
dst
,
data
in
G
.
edges_iter
(
data
=
True
):
e
=
gtG
.
add_edge
(
d
[
src
],
d
[
dst
])
gtG
.
ep
[
'action'
][
e
]
=
data
[
'action'
]
nodes_to_id
=
d
timer
.
toc
(
average
=
True
,
log_at
=
1
,
log_str
=
'src.graph_utils.convert_to_graph_tool'
)
return
gtG
,
nodes_array
,
nodes_to_id
def
_rejection_sampling
(
rng
,
sampling_d
,
target_d
,
bins
,
hardness
,
M
):
bin_ind
=
np
.
digitize
(
hardness
,
bins
)
-
1
i
=
0
ratio
=
target_d
[
bin_ind
]
/
(
M
*
sampling_d
[
bin_ind
])
while
i
<
ratio
.
size
and
rng
.
rand
()
>
ratio
[
i
]:
i
=
i
+
1
return
i
def
heuristic_fn_vec
(
n1
,
n2
,
n_ori
,
step_size
):
# n1 is a vector and n2 is a single point.
dx
=
(
n1
[:,
0
]
-
n2
[
0
,
0
])
/
step_size
dy
=
(
n1
[:,
1
]
-
n2
[
0
,
1
])
/
step_size
dt
=
n1
[:,
2
]
-
n2
[
0
,
2
]
dt
=
np
.
mod
(
dt
,
n_ori
)
dt
=
np
.
minimum
(
dt
,
n_ori
-
dt
)
if
n_ori
==
6
:
if
dx
*
dy
>
0
:
d
=
np
.
maximum
(
np
.
abs
(
dx
),
np
.
abs
(
dy
))
else
:
d
=
np
.
abs
(
dy
-
dx
)
elif
n_ori
==
4
:
d
=
np
.
abs
(
dx
)
+
np
.
abs
(
dy
)
return
(
d
+
dt
).
reshape
((
-
1
,
1
))
def
get_hardness_distribution
(
gtG
,
max_dist
,
min_dist
,
rng
,
trials
,
bins
,
nodes
,
n_ori
,
step_size
):
heuristic_fn
=
lambda
node_ids
,
node_id
:
\
heuristic_fn_vec
(
nodes
[
node_ids
,
:],
nodes
[[
node_id
],
:],
n_ori
,
step_size
)
num_nodes
=
gtG
.
num_vertices
()
gt_dists
=
[];
h_dists
=
[];
for
i
in
range
(
trials
):
end_node_id
=
rng
.
choice
(
num_nodes
)
gt_dist
=
gt
.
topology
.
shortest_distance
(
gt
.
GraphView
(
gtG
,
reversed
=
True
),
source
=
gtG
.
vertex
(
end_node_id
),
target
=
None
,
max_dist
=
max_dist
)
gt_dist
=
np
.
array
(
gt_dist
.
get_array
())
ind
=
np
.
where
(
np
.
logical_and
(
gt_dist
<=
max_dist
,
gt_dist
>=
min_dist
))[
0
]
gt_dist
=
gt_dist
[
ind
]
h_dist
=
heuristic_fn
(
ind
,
end_node_id
)[:,
0
]
gt_dists
.
append
(
gt_dist
)
h_dists
.
append
(
h_dist
)
gt_dists
=
np
.
concatenate
(
gt_dists
)
h_dists
=
np
.
concatenate
(
h_dists
)
hardness
=
1.
-
h_dists
*
1.
/
gt_dists
hist
,
_
=
np
.
histogram
(
hardness
,
bins
)
hist
=
hist
.
astype
(
np
.
float64
)
hist
=
hist
/
np
.
sum
(
hist
)
return
hist
def
rng_next_goal_rejection_sampling
(
start_node_ids
,
batch_size
,
gtG
,
rng
,
max_dist
,
min_dist
,
max_dist_to_compute
,
sampling_d
,
target_d
,
nodes
,
n_ori
,
step_size
,
bins
,
M
):
sample_start_nodes
=
start_node_ids
is
None
dists
=
[];
pred_maps
=
[];
end_node_ids
=
[];
start_node_ids_
=
[];
hardnesss
=
[];
gt_dists
=
[];
num_nodes
=
gtG
.
num_vertices
()
for
i
in
range
(
batch_size
):
done
=
False
while
not
done
:
if
sample_start_nodes
:
start_node_id
=
rng
.
choice
(
num_nodes
)
else
:
start_node_id
=
start_node_ids
[
i
]
gt_dist
=
gt
.
topology
.
shortest_distance
(
gt
.
GraphView
(
gtG
,
reversed
=
False
),
source
=
start_node_id
,
target
=
None
,
max_dist
=
max_dist
)
gt_dist
=
np
.
array
(
gt_dist
.
get_array
())
ind
=
np
.
where
(
np
.
logical_and
(
gt_dist
<=
max_dist
,
gt_dist
>=
min_dist
))[
0
]
ind
=
rng
.
permutation
(
ind
)
gt_dist
=
gt_dist
[
ind
]
*
1.
h_dist
=
heuristic_fn_vec
(
nodes
[
ind
,
:],
nodes
[[
start_node_id
],
:],
n_ori
,
step_size
)[:,
0
]
hardness
=
1.
-
h_dist
/
gt_dist
sampled_ind
=
_rejection_sampling
(
rng
,
sampling_d
,
target_d
,
bins
,
hardness
,
M
)
if
sampled_ind
<
ind
.
size
:
# print sampled_ind
end_node_id
=
ind
[
sampled_ind
]
hardness
=
hardness
[
sampled_ind
]
gt_dist
=
gt_dist
[
sampled_ind
]
done
=
True
# Compute distance from end node to all nodes, to return.
dist
,
pred_map
=
gt
.
topology
.
shortest_distance
(
gt
.
GraphView
(
gtG
,
reversed
=
True
),
source
=
end_node_id
,
target
=
None
,
max_dist
=
max_dist_to_compute
,
pred_map
=
True
)
dist
=
np
.
array
(
dist
.
get_array
())
pred_map
=
np
.
array
(
pred_map
.
get_array
())
hardnesss
.
append
(
hardness
);
dists
.
append
(
dist
);
pred_maps
.
append
(
pred_map
);
start_node_ids_
.
append
(
start_node_id
);
end_node_ids
.
append
(
end_node_id
);
gt_dists
.
append
(
gt_dist
);
paths
=
None
return
start_node_ids_
,
end_node_ids
,
dists
,
pred_maps
,
paths
,
hardnesss
,
gt_dists
def
rng_next_goal
(
start_node_ids
,
batch_size
,
gtG
,
rng
,
max_dist
,
max_dist_to_compute
,
node_room_ids
,
nodes
=
None
,
compute_path
=
False
,
dists_from_start_node
=
None
):
# Compute the distance field from the starting location, and then pick a
# destination in another room if possible otherwise anywhere outside this
# room.
dists
=
[];
pred_maps
=
[];
paths
=
[];
end_node_ids
=
[];
for
i
in
range
(
batch_size
):
room_id
=
node_room_ids
[
start_node_ids
[
i
]]
# Compute distances.
if
dists_from_start_node
==
None
:
dist
,
pred_map
=
gt
.
topology
.
shortest_distance
(
gt
.
GraphView
(
gtG
,
reversed
=
False
),
source
=
gtG
.
vertex
(
start_node_ids
[
i
]),
target
=
None
,
max_dist
=
max_dist_to_compute
,
pred_map
=
True
)
dist
=
np
.
array
(
dist
.
get_array
())
else
:
dist
=
dists_from_start_node
[
i
]
# Randomly sample nodes which are within max_dist.
near_ids
=
dist
<=
max_dist
near_ids
=
near_ids
[:,
np
.
newaxis
]
# Check to see if there is a non-negative node which is close enough.
non_same_room_ids
=
node_room_ids
!=
room_id
non_hallway_ids
=
node_room_ids
!=
-
1
good1_ids
=
np
.
logical_and
(
near_ids
,
np
.
logical_and
(
non_same_room_ids
,
non_hallway_ids
))
good2_ids
=
np
.
logical_and
(
near_ids
,
non_hallway_ids
)
good3_ids
=
near_ids
if
np
.
any
(
good1_ids
):
end_node_id
=
rng
.
choice
(
np
.
where
(
good1_ids
)[
0
])
elif
np
.
any
(
good2_ids
):
end_node_id
=
rng
.
choice
(
np
.
where
(
good2_ids
)[
0
])
elif
np
.
any
(
good3_ids
):
end_node_id
=
rng
.
choice
(
np
.
where
(
good3_ids
)[
0
])
else
:
logging
.
error
(
'Did not find any good nodes.'
)
# Compute distance to this new goal for doing distance queries.
dist
,
pred_map
=
gt
.
topology
.
shortest_distance
(
gt
.
GraphView
(
gtG
,
reversed
=
True
),
source
=
gtG
.
vertex
(
end_node_id
),
target
=
None
,
max_dist
=
max_dist_to_compute
,
pred_map
=
True
)
dist
=
np
.
array
(
dist
.
get_array
())
pred_map
=
np
.
array
(
pred_map
.
get_array
())
dists
.
append
(
dist
)
pred_maps
.
append
(
pred_map
)
end_node_ids
.
append
(
end_node_id
)
path
=
None
if
compute_path
:
path
=
get_path_ids
(
start_node_ids
[
i
],
end_node_ids
[
i
],
pred_map
)
paths
.
append
(
path
)
return
start_node_ids
,
end_node_ids
,
dists
,
pred_maps
,
paths
def
rng_room_to_room
(
batch_size
,
gtG
,
rng
,
max_dist
,
max_dist_to_compute
,
node_room_ids
,
nodes
=
None
,
compute_path
=
False
):
# Sample one of the rooms, compute the distance field. Pick a destination in
# another room if possible otherwise anywhere outside this room.
dists
=
[];
pred_maps
=
[];
paths
=
[];
start_node_ids
=
[];
end_node_ids
=
[];
room_ids
=
np
.
unique
(
node_room_ids
[
node_room_ids
[:,
0
]
>=
0
,
0
])
for
i
in
range
(
batch_size
):
room_id
=
rng
.
choice
(
room_ids
)
end_node_id
=
rng
.
choice
(
np
.
where
(
node_room_ids
[:,
0
]
==
room_id
)[
0
])
end_node_ids
.
append
(
end_node_id
)
# Compute distances.
dist
,
pred_map
=
gt
.
topology
.
shortest_distance
(
gt
.
GraphView
(
gtG
,
reversed
=
True
),
source
=
gtG
.
vertex
(
end_node_id
),
target
=
None
,
max_dist
=
max_dist_to_compute
,
pred_map
=
True
)
dist
=
np
.
array
(
dist
.
get_array
())
pred_map
=
np
.
array
(
pred_map
.
get_array
())
dists
.
append
(
dist
)
pred_maps
.
append
(
pred_map
)
# Randomly sample nodes which are within max_dist.
near_ids
=
dist
<=
max_dist
near_ids
=
near_ids
[:,
np
.
newaxis
]
# Check to see if there is a non-negative node which is close enough.
non_same_room_ids
=
node_room_ids
!=
room_id
non_hallway_ids
=
node_room_ids
!=
-
1
good1_ids
=
np
.
logical_and
(
near_ids
,
np
.
logical_and
(
non_same_room_ids
,
non_hallway_ids
))
good2_ids
=
np
.
logical_and
(
near_ids
,
non_hallway_ids
)
good3_ids
=
near_ids
if
np
.
any
(
good1_ids
):
start_node_id
=
rng
.
choice
(
np
.
where
(
good1_ids
)[
0
])
elif
np
.
any
(
good2_ids
):
start_node_id
=
rng
.
choice
(
np
.
where
(
good2_ids
)[
0
])
elif
np
.
any
(
good3_ids
):
start_node_id
=
rng
.
choice
(
np
.
where
(
good3_ids
)[
0
])
else
:
logging
.
error
(
'Did not find any good nodes.'
)
start_node_ids
.
append
(
start_node_id
)
path
=
None
if
compute_path
:
path
=
get_path_ids
(
start_node_ids
[
i
],
end_node_ids
[
i
],
pred_map
)
paths
.
append
(
path
)
return
start_node_ids
,
end_node_ids
,
dists
,
pred_maps
,
paths
def
rng_target_dist_field
(
batch_size
,
gtG
,
rng
,
max_dist
,
max_dist_to_compute
,
nodes
=
None
,
compute_path
=
False
):
# Sample a single node, compute distance to all nodes less than max_dist,
# sample nodes which are a particular distance away.
dists
=
[];
pred_maps
=
[];
paths
=
[];
start_node_ids
=
[]
end_node_ids
=
rng
.
choice
(
gtG
.
num_vertices
(),
size
=
(
batch_size
,),
replace
=
False
).
tolist
()
for
i
in
range
(
batch_size
):
dist
,
pred_map
=
gt
.
topology
.
shortest_distance
(
gt
.
GraphView
(
gtG
,
reversed
=
True
),
source
=
gtG
.
vertex
(
end_node_ids
[
i
]),
target
=
None
,
max_dist
=
max_dist_to_compute
,
pred_map
=
True
)
dist
=
np
.
array
(
dist
.
get_array
())
pred_map
=
np
.
array
(
pred_map
.
get_array
())
dists
.
append
(
dist
)
pred_maps
.
append
(
pred_map
)
# Randomly sample nodes which are withing max_dist
near_ids
=
np
.
where
(
dist
<=
max_dist
)[
0
]
start_node_id
=
rng
.
choice
(
near_ids
,
size
=
(
1
,),
replace
=
False
)[
0
]
start_node_ids
.
append
(
start_node_id
)
path
=
None
if
compute_path
:
path
=
get_path_ids
(
start_node_ids
[
i
],
end_node_ids
[
i
],
pred_map
)
paths
.
append
(
path
)
return
start_node_ids
,
end_node_ids
,
dists
,
pred_maps
,
paths
research/cognitive_mapping_and_planning/src/map_utils.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various function to compute the ground truth map for training etc.
"""
import
copy
import
skimage.morphology
import
logging
import
numpy
as
np
import
scipy.ndimage
import
matplotlib.pyplot
as
plt
import
PIL
import
src.utils
as
utils
import
cv2
def
_get_xy_bounding_box
(
vertex
,
padding
):
"""Returns the xy bounding box of the environment."""
min_
=
np
.
floor
(
np
.
min
(
vertex
[:,
:
2
],
axis
=
0
)
-
padding
).
astype
(
np
.
int
)
max_
=
np
.
ceil
(
np
.
max
(
vertex
[:,
:
2
],
axis
=
0
)
+
padding
).
astype
(
np
.
int
)
return
min_
,
max_
def
_project_to_map
(
map
,
vertex
,
wt
=
None
,
ignore_points_outside_map
=
False
):
"""Projects points to map, returns how many points are present at each
location."""
num_points
=
np
.
zeros
((
map
.
size
[
1
],
map
.
size
[
0
]))
vertex_
=
vertex
[:,
:
2
]
-
map
.
origin
vertex_
=
np
.
round
(
vertex_
/
map
.
resolution
).
astype
(
np
.
int
)
if
ignore_points_outside_map
:
good_ind
=
np
.
all
(
np
.
array
([
vertex_
[:,
1
]
>=
0
,
vertex_
[:,
1
]
<
map
.
size
[
1
],
vertex_
[:,
0
]
>=
0
,
vertex_
[:,
0
]
<
map
.
size
[
0
]]),
axis
=
0
)
vertex_
=
vertex_
[
good_ind
,
:]
if
wt
is
not
None
:
wt
=
wt
[
good_ind
,
:]
if
wt
is
None
:
np
.
add
.
at
(
num_points
,
(
vertex_
[:,
1
],
vertex_
[:,
0
]),
1
)
else
:
assert
(
wt
.
shape
[
0
]
==
vertex
.
shape
[
0
]),
\
'number of weights should be same as vertices.'
np
.
add
.
at
(
num_points
,
(
vertex_
[:,
1
],
vertex_
[:,
0
]),
wt
)
return
num_points
def
make_map
(
padding
,
resolution
,
vertex
=
None
,
sc
=
1.
):
"""Returns a map structure."""
min_
,
max_
=
_get_xy_bounding_box
(
vertex
*
sc
,
padding
=
padding
)
sz
=
np
.
ceil
((
max_
-
min_
+
1
)
/
resolution
).
astype
(
np
.
int32
)
max_
=
min_
+
sz
*
resolution
-
1
map
=
utils
.
Foo
(
origin
=
min_
,
size
=
sz
,
max
=
max_
,
resolution
=
resolution
,
padding
=
padding
)
return
map
def
_fill_holes
(
img
,
thresh
):
"""Fills holes less than thresh area (assumes 4 connectivity when computing
hole area."""
l
,
n
=
scipy
.
ndimage
.
label
(
np
.
logical_not
(
img
))
img_
=
img
==
True
cnts
=
np
.
bincount
(
l
.
reshape
(
-
1
))
for
i
,
cnt
in
enumerate
(
cnts
):
if
cnt
<
thresh
:
l
[
l
==
i
]
=
-
1
img_
[
l
==
-
1
]
=
True
return
img_
def
compute_traversibility
(
map
,
robot_base
,
robot_height
,
robot_radius
,
valid_min
,
valid_max
,
num_point_threshold
,
shapess
,
sc
=
100.
,
n_samples_per_face
=
200
):
"""Returns a bit map with pixels that are traversible or not as long as the
robot center is inside this volume we are good colisions can be detected by
doing a line search on things, or walking from current location to final
location in the bitmap, or doing bwlabel on the traversibility map."""
tt
=
utils
.
Timer
()
tt
.
tic
()
num_obstcale_points
=
np
.
zeros
((
map
.
size
[
1
],
map
.
size
[
0
]))
num_points
=
np
.
zeros
((
map
.
size
[
1
],
map
.
size
[
0
]))
for
i
,
shapes
in
enumerate
(
shapess
):
for
j
in
range
(
shapes
.
get_number_of_meshes
()):
p
,
face_areas
,
face_idx
=
shapes
.
sample_points_on_face_of_shape
(
j
,
n_samples_per_face
,
sc
)
wt
=
face_areas
[
face_idx
]
/
n_samples_per_face
ind
=
np
.
all
(
np
.
concatenate
(
(
p
[:,
[
2
]]
>
robot_base
,
p
[:,
[
2
]]
<
robot_base
+
robot_height
),
axis
=
1
),
axis
=
1
)
num_obstcale_points
+=
_project_to_map
(
map
,
p
[
ind
,
:],
wt
[
ind
])
ind
=
np
.
all
(
np
.
concatenate
(
(
p
[:,
[
2
]]
>
valid_min
,
p
[:,
[
2
]]
<
valid_max
),
axis
=
1
),
axis
=
1
)
num_points
+=
_project_to_map
(
map
,
p
[
ind
,
:],
wt
[
ind
])
selem
=
skimage
.
morphology
.
disk
(
robot_radius
/
map
.
resolution
)
obstacle_free
=
skimage
.
morphology
.
binary_dilation
(
_fill_holes
(
num_obstcale_points
>
num_point_threshold
,
20
),
selem
)
!=
True
valid_space
=
_fill_holes
(
num_points
>
num_point_threshold
,
20
)
traversible
=
np
.
all
(
np
.
concatenate
((
obstacle_free
[...,
np
.
newaxis
],
valid_space
[...,
np
.
newaxis
]),
axis
=
2
),
axis
=
2
)
# plt.imshow(np.concatenate((obstacle_free, valid_space, traversible), axis=1))
# plt.show()
map_out
=
copy
.
deepcopy
(
map
)
map_out
.
num_obstcale_points
=
num_obstcale_points
map_out
.
num_points
=
num_points
map_out
.
traversible
=
traversible
map_out
.
obstacle_free
=
obstacle_free
map_out
.
valid_space
=
valid_space
tt
.
toc
(
log_at
=
1
,
log_str
=
'src.map_utils.compute_traversibility: '
)
return
map_out
def
resize_maps
(
map
,
map_scales
,
resize_method
):
scaled_maps
=
[]
for
i
,
sc
in
enumerate
(
map_scales
):
if
resize_method
==
'antialiasing'
:
# Resize using open cv so that we can compute the size.
# Use PIL resize to use anti aliasing feature.
map_
=
cv2
.
resize
(
map
*
1
,
None
,
None
,
fx
=
sc
,
fy
=
sc
,
interpolation
=
cv2
.
INTER_LINEAR
)
w
=
map_
.
shape
[
1
];
h
=
map_
.
shape
[
0
]
map_img
=
PIL
.
Image
.
fromarray
((
map
*
255
).
astype
(
np
.
uint8
))
map__img
=
map_img
.
resize
((
w
,
h
),
PIL
.
Image
.
ANTIALIAS
)
map_
=
np
.
asarray
(
map__img
).
astype
(
np
.
float32
)
map_
=
map_
/
255.
map_
=
np
.
minimum
(
map_
,
1.0
)
map_
=
np
.
maximum
(
map_
,
0.0
)
elif
resize_method
==
'linear_noantialiasing'
:
map_
=
cv2
.
resize
(
map
*
1
,
None
,
None
,
fx
=
sc
,
fy
=
sc
,
interpolation
=
cv2
.
INTER_LINEAR
)
else
:
logging
.
error
(
'Unknown resizing method'
)
scaled_maps
.
append
(
map_
)
return
scaled_maps
def
pick_largest_cc
(
traversible
):
out
=
scipy
.
ndimage
.
label
(
traversible
)[
0
]
cnt
=
np
.
bincount
(
out
.
reshape
(
-
1
))[
1
:]
return
out
==
np
.
argmax
(
cnt
)
+
1
def
get_graph_origin_loc
(
rng
,
traversible
):
"""Erode the traversibility mask so that we get points in the bulk of the
graph, and not end up with a situation where the graph is localized in the
corner of a cramped room. Output Locs is in the coordinate frame of the
map."""
aa
=
pick_largest_cc
(
skimage
.
morphology
.
binary_erosion
(
traversible
==
True
,
selem
=
np
.
ones
((
15
,
15
))))
y
,
x
=
np
.
where
(
aa
>
0
)
ind
=
rng
.
choice
(
y
.
size
)
locs
=
np
.
array
([
x
[
ind
],
y
[
ind
]])
locs
=
locs
+
rng
.
rand
(
*
(
locs
.
shape
))
-
0.5
return
locs
def
generate_egocentric_maps
(
scaled_maps
,
map_scales
,
map_crop_sizes
,
loc
,
x_axis
,
y_axis
,
theta
):
maps
=
[]
for
i
,
(
map_
,
sc
,
map_crop_size
)
in
enumerate
(
zip
(
scaled_maps
,
map_scales
,
map_crop_sizes
)):
maps_i
=
np
.
array
(
get_map_to_predict
(
loc
*
sc
,
x_axis
,
y_axis
,
map_
,
map_crop_size
,
interpolation
=
cv2
.
INTER_LINEAR
)[
0
])
maps_i
[
np
.
isnan
(
maps_i
)]
=
0
maps
.
append
(
maps_i
)
return
maps
def
generate_goal_images
(
map_scales
,
map_crop_sizes
,
n_ori
,
goal_dist
,
goal_theta
,
rel_goal_orientation
):
goal_dist
=
goal_dist
[:,
0
]
goal_theta
=
goal_theta
[:,
0
]
rel_goal_orientation
=
rel_goal_orientation
[:,
0
]
goals
=
[];
# Generate the map images.
for
i
,
(
sc
,
map_crop_size
)
in
enumerate
(
zip
(
map_scales
,
map_crop_sizes
)):
goal_i
=
np
.
zeros
((
goal_dist
.
shape
[
0
],
map_crop_size
,
map_crop_size
,
n_ori
),
dtype
=
np
.
float32
)
x
=
goal_dist
*
np
.
cos
(
goal_theta
)
*
sc
+
(
map_crop_size
-
1.
)
/
2.
y
=
goal_dist
*
np
.
sin
(
goal_theta
)
*
sc
+
(
map_crop_size
-
1.
)
/
2.
for
j
in
range
(
goal_dist
.
shape
[
0
]):
gc
=
rel_goal_orientation
[
j
]
x0
=
np
.
floor
(
x
[
j
]).
astype
(
np
.
int32
);
x1
=
x0
+
1
;
y0
=
np
.
floor
(
y
[
j
]).
astype
(
np
.
int32
);
y1
=
y0
+
1
;
if
x0
>=
0
and
x0
<=
map_crop_size
-
1
:
if
y0
>=
0
and
y0
<=
map_crop_size
-
1
:
goal_i
[
j
,
y0
,
x0
,
gc
]
=
(
x1
-
x
[
j
])
*
(
y1
-
y
[
j
])
if
y1
>=
0
and
y1
<=
map_crop_size
-
1
:
goal_i
[
j
,
y1
,
x0
,
gc
]
=
(
x1
-
x
[
j
])
*
(
y
[
j
]
-
y0
)
if
x1
>=
0
and
x1
<=
map_crop_size
-
1
:
if
y0
>=
0
and
y0
<=
map_crop_size
-
1
:
goal_i
[
j
,
y0
,
x1
,
gc
]
=
(
x
[
j
]
-
x0
)
*
(
y1
-
y
[
j
])
if
y1
>=
0
and
y1
<=
map_crop_size
-
1
:
goal_i
[
j
,
y1
,
x1
,
gc
]
=
(
x
[
j
]
-
x0
)
*
(
y
[
j
]
-
y0
)
goals
.
append
(
goal_i
)
return
goals
def
get_map_to_predict
(
src_locs
,
src_x_axiss
,
src_y_axiss
,
map
,
map_size
,
interpolation
=
cv2
.
INTER_LINEAR
):
fss
=
[]
valids
=
[]
center
=
(
map_size
-
1.0
)
/
2.0
dst_theta
=
np
.
pi
/
2.0
dst_loc
=
np
.
array
([
center
,
center
])
dst_x_axis
=
np
.
array
([
np
.
cos
(
dst_theta
),
np
.
sin
(
dst_theta
)])
dst_y_axis
=
np
.
array
([
np
.
cos
(
dst_theta
+
np
.
pi
/
2
),
np
.
sin
(
dst_theta
+
np
.
pi
/
2
)])
def
compute_points
(
center
,
x_axis
,
y_axis
):
points
=
np
.
zeros
((
3
,
2
),
dtype
=
np
.
float32
)
points
[
0
,:]
=
center
points
[
1
,:]
=
center
+
x_axis
points
[
2
,:]
=
center
+
y_axis
return
points
dst_points
=
compute_points
(
dst_loc
,
dst_x_axis
,
dst_y_axis
)
for
i
in
range
(
src_locs
.
shape
[
0
]):
src_loc
=
src_locs
[
i
,:]
src_x_axis
=
src_x_axiss
[
i
,:]
src_y_axis
=
src_y_axiss
[
i
,:]
src_points
=
compute_points
(
src_loc
,
src_x_axis
,
src_y_axis
)
M
=
cv2
.
getAffineTransform
(
src_points
,
dst_points
)
fs
=
cv2
.
warpAffine
(
map
,
M
,
(
map_size
,
map_size
),
None
,
flags
=
interpolation
,
borderValue
=
np
.
NaN
)
valid
=
np
.
invert
(
np
.
isnan
(
fs
))
valids
.
append
(
valid
)
fss
.
append
(
fs
)
return
fss
,
valids
research/cognitive_mapping_and_planning/src/rotation_utils.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for generating and applying rotation matrices.
"""
import
numpy
as
np
ANGLE_EPS
=
0.001
def
normalize
(
v
):
return
v
/
np
.
linalg
.
norm
(
v
)
def
get_r_matrix
(
ax_
,
angle
):
ax
=
normalize
(
ax_
)
if
np
.
abs
(
angle
)
>
ANGLE_EPS
:
S_hat
=
np
.
array
(
[[
0.0
,
-
ax
[
2
],
ax
[
1
]],
[
ax
[
2
],
0.0
,
-
ax
[
0
]],
[
-
ax
[
1
],
ax
[
0
],
0.0
]],
dtype
=
np
.
float32
)
R
=
np
.
eye
(
3
)
+
np
.
sin
(
angle
)
*
S_hat
+
\
(
1
-
np
.
cos
(
angle
))
*
(
np
.
linalg
.
matrix_power
(
S_hat
,
2
))
else
:
R
=
np
.
eye
(
3
)
return
R
def
r_between
(
v_from_
,
v_to_
):
v_from
=
normalize
(
v_from_
)
v_to
=
normalize
(
v_to_
)
ax
=
normalize
(
np
.
cross
(
v_from
,
v_to
))
angle
=
np
.
arccos
(
np
.
dot
(
v_from
,
v_to
))
return
get_r_matrix
(
ax
,
angle
)
def
rotate_camera_to_point_at
(
up_from
,
lookat_from
,
up_to
,
lookat_to
):
inputs
=
[
up_from
,
lookat_from
,
up_to
,
lookat_to
]
for
i
in
range
(
4
):
inputs
[
i
]
=
normalize
(
np
.
array
(
inputs
[
i
]).
reshape
((
-
1
,)))
up_from
,
lookat_from
,
up_to
,
lookat_to
=
inputs
r1
=
r_between
(
lookat_from
,
lookat_to
)
new_x
=
np
.
dot
(
r1
,
np
.
array
([
1
,
0
,
0
]).
reshape
((
-
1
,
1
))).
reshape
((
-
1
))
to_x
=
normalize
(
np
.
cross
(
lookat_to
,
up_to
))
angle
=
np
.
arccos
(
np
.
dot
(
new_x
,
to_x
))
if
angle
>
ANGLE_EPS
:
if
angle
<
np
.
pi
-
ANGLE_EPS
:
ax
=
normalize
(
np
.
cross
(
new_x
,
to_x
))
flip
=
np
.
dot
(
lookat_to
,
ax
)
if
flip
>
0
:
r2
=
get_r_matrix
(
lookat_to
,
angle
)
elif
flip
<
0
:
r2
=
get_r_matrix
(
lookat_to
,
-
1.
*
angle
)
else
:
# Angle of rotation is too close to 180 degrees, direction of rotation
# does not matter.
r2
=
get_r_matrix
(
lookat_to
,
angle
)
else
:
r2
=
np
.
eye
(
3
)
return
np
.
dot
(
r2
,
r1
)
research/cognitive_mapping_and_planning/src/utils.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r
"""Generaly Utilities.
"""
import
numpy
as
np
,
cPickle
,
os
,
time
from
six.moves
import
xrange
import
src.file_utils
as
fu
import
logging
class
Timer
():
def
__init__
(
self
):
self
.
calls
=
0.
self
.
start_time
=
0.
self
.
time_per_call
=
0.
self
.
total_time
=
0.
self
.
last_log_time
=
0.
def
tic
(
self
):
self
.
start_time
=
time
.
time
()
def
toc
(
self
,
average
=
True
,
log_at
=-
1
,
log_str
=
''
,
type
=
'calls'
):
if
self
.
start_time
==
0
:
logging
.
error
(
'Timer not started by calling tic().'
)
t
=
time
.
time
()
diff
=
time
.
time
()
-
self
.
start_time
self
.
total_time
+=
diff
self
.
calls
+=
1.
self
.
time_per_call
=
self
.
total_time
/
self
.
calls
if
type
==
'calls'
and
log_at
>
0
and
np
.
mod
(
self
.
calls
,
log_at
)
==
0
:
_
=
[]
logging
.
info
(
'%s: %f seconds.'
,
log_str
,
self
.
time_per_call
)
elif
type
==
'time'
and
log_at
>
0
and
t
-
self
.
last_log_time
>=
log_at
:
_
=
[]
logging
.
info
(
'%s: %f seconds.'
,
log_str
,
self
.
time_per_call
)
self
.
last_log_time
=
t
if
average
:
return
self
.
time_per_call
else
:
return
diff
class
Foo
(
object
):
def
__init__
(
self
,
**
kwargs
):
self
.
__dict__
.
update
(
kwargs
)
def
__str__
(
self
):
str_
=
''
for
v
in
vars
(
self
).
keys
():
a
=
getattr
(
self
,
v
)
if
True
:
#isinstance(v, object):
str__
=
str
(
a
)
str__
=
str__
.
replace
(
'
\n
'
,
'
\n
'
)
else
:
str__
=
str
(
a
)
str_
+=
'{:s}: {:s}'
.
format
(
v
,
str__
)
str_
+=
'
\n
'
return
str_
def
dict_equal
(
dict1
,
dict2
):
assert
(
set
(
dict1
.
keys
())
==
set
(
dict2
.
keys
())),
"Sets of keys between 2 dictionaries are different."
for
k
in
dict1
.
keys
():
assert
(
type
(
dict1
[
k
])
==
type
(
dict2
[
k
])),
"Type of key '{:s}' if different."
.
format
(
k
)
if
type
(
dict1
[
k
])
==
np
.
ndarray
:
assert
(
dict1
[
k
].
dtype
==
dict2
[
k
].
dtype
),
"Numpy Type of key '{:s}' if different."
.
format
(
k
)
assert
(
np
.
allclose
(
dict1
[
k
],
dict2
[
k
])),
"Value for key '{:s}' do not match."
.
format
(
k
)
else
:
assert
(
dict1
[
k
]
==
dict2
[
k
]),
"Value for key '{:s}' do not match."
.
format
(
k
)
return
True
def
subplot
(
plt
,
Y_X
,
sz_y_sz_x
=
(
10
,
10
)):
Y
,
X
=
Y_X
sz_y
,
sz_x
=
sz_y_sz_x
plt
.
rcParams
[
'figure.figsize'
]
=
(
X
*
sz_x
,
Y
*
sz_y
)
fig
,
axes
=
plt
.
subplots
(
Y
,
X
)
plt
.
subplots_adjust
(
wspace
=
0.1
,
hspace
=
0.1
)
return
fig
,
axes
def
tic_toc_print
(
interval
,
string
):
global
tic_toc_print_time_old
if
'tic_toc_print_time_old'
not
in
globals
():
tic_toc_print_time_old
=
time
.
time
()
print
(
string
)
else
:
new_time
=
time
.
time
()
if
new_time
-
tic_toc_print_time_old
>
interval
:
tic_toc_print_time_old
=
new_time
;
print
(
string
)
def
mkdir_if_missing
(
output_dir
):
if
not
fu
.
exists
(
output_dir
):
fu
.
makedirs
(
output_dir
)
def
save_variables
(
pickle_file_name
,
var
,
info
,
overwrite
=
False
):
if
fu
.
exists
(
pickle_file_name
)
and
overwrite
==
False
:
raise
Exception
(
'{:s} exists and over write is false.'
.
format
(
pickle_file_name
))
# Construct the dictionary
assert
(
type
(
var
)
==
list
);
assert
(
type
(
info
)
==
list
);
d
=
{}
for
i
in
xrange
(
len
(
var
)):
d
[
info
[
i
]]
=
var
[
i
]
with
fu
.
fopen
(
pickle_file_name
,
'w'
)
as
f
:
cPickle
.
dump
(
d
,
f
,
cPickle
.
HIGHEST_PROTOCOL
)
def
load_variables
(
pickle_file_name
):
if
fu
.
exists
(
pickle_file_name
):
with
fu
.
fopen
(
pickle_file_name
,
'r'
)
as
f
:
d
=
cPickle
.
load
(
f
)
return
d
else
:
raise
Exception
(
'{:s} does not exists.'
.
format
(
pickle_file_name
))
def
voc_ap
(
rec
,
prec
):
rec
=
rec
.
reshape
((
-
1
,
1
))
prec
=
prec
.
reshape
((
-
1
,
1
))
z
=
np
.
zeros
((
1
,
1
))
o
=
np
.
ones
((
1
,
1
))
mrec
=
np
.
vstack
((
z
,
rec
,
o
))
mpre
=
np
.
vstack
((
z
,
prec
,
z
))
for
i
in
range
(
len
(
mpre
)
-
2
,
-
1
,
-
1
):
mpre
[
i
]
=
max
(
mpre
[
i
],
mpre
[
i
+
1
])
I
=
np
.
where
(
mrec
[
1
:]
!=
mrec
[
0
:
-
1
])[
0
]
+
1
;
ap
=
0
;
for
i
in
I
:
ap
=
ap
+
(
mrec
[
i
]
-
mrec
[
i
-
1
])
*
mpre
[
i
];
return
ap
def
tight_imshow_figure
(
plt
,
figsize
=
None
):
fig
=
plt
.
figure
(
figsize
=
figsize
)
ax
=
plt
.
Axes
(
fig
,
[
0
,
0
,
1
,
1
])
ax
.
set_axis_off
()
fig
.
add_axes
(
ax
)
return
fig
,
ax
def
calc_pr
(
gt
,
out
,
wt
=
None
):
if
wt
is
None
:
wt
=
np
.
ones
((
gt
.
size
,
1
))
gt
=
gt
.
astype
(
np
.
float64
).
reshape
((
-
1
,
1
))
wt
=
wt
.
astype
(
np
.
float64
).
reshape
((
-
1
,
1
))
out
=
out
.
astype
(
np
.
float64
).
reshape
((
-
1
,
1
))
gt
=
gt
*
wt
tog
=
np
.
concatenate
([
gt
,
wt
,
out
],
axis
=
1
)
*
1.
ind
=
np
.
argsort
(
tog
[:,
2
],
axis
=
0
)[::
-
1
]
tog
=
tog
[
ind
,:]
cumsumsortgt
=
np
.
cumsum
(
tog
[:,
0
])
cumsumsortwt
=
np
.
cumsum
(
tog
[:,
1
])
prec
=
cumsumsortgt
/
cumsumsortwt
rec
=
cumsumsortgt
/
np
.
sum
(
tog
[:,
0
])
ap
=
voc_ap
(
rec
,
prec
)
return
ap
,
rec
,
prec
research/cognitive_mapping_and_planning/tfcode/__init__.py
deleted
100644 → 0
View file @
09bc9f54
research/cognitive_mapping_and_planning/tfcode/cmp.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for setting up the network for CMP.
Sets up the mapper and the planner.
"""
import
sys
,
os
,
numpy
as
np
import
matplotlib.pyplot
as
plt
import
copy
import
argparse
,
pprint
import
time
import
tensorflow
as
tf
from
tensorflow.contrib
import
slim
from
tensorflow.contrib.slim
import
arg_scope
import
logging
from
tensorflow.python.platform
import
app
from
tensorflow.python.platform
import
flags
from
src
import
utils
import
src.file_utils
as
fu
import
tfcode.nav_utils
as
nu
import
tfcode.cmp_utils
as
cu
import
tfcode.cmp_summary
as
cmp_s
from
tfcode
import
tf_utils
value_iteration_network
=
cu
.
value_iteration_network
rotate_preds
=
cu
.
rotate_preds
deconv
=
cu
.
deconv
get_visual_frustum
=
cu
.
get_visual_frustum
fr_v2
=
cu
.
fr_v2
setup_train_step_kwargs
=
nu
.
default_train_step_kwargs
compute_losses_multi_or
=
nu
.
compute_losses_multi_or
get_repr_from_image
=
nu
.
get_repr_from_image
_save_d_at_t
=
nu
.
save_d_at_t
_save_all
=
nu
.
save_all
_eval_ap
=
nu
.
eval_ap
_eval_dist
=
nu
.
eval_dist
_plot_trajectories
=
nu
.
plot_trajectories
_vis_readout_maps
=
cmp_s
.
_vis_readout_maps
_vis
=
cmp_s
.
_vis
_summary_vis
=
cmp_s
.
_summary_vis
_summary_readout_maps
=
cmp_s
.
_summary_readout_maps
_add_summaries
=
cmp_s
.
_add_summaries
def
_inputs
(
problem
):
# Set up inputs.
with
tf
.
name_scope
(
'inputs'
):
inputs
=
[]
inputs
.
append
((
'orig_maps'
,
tf
.
float32
,
(
problem
.
batch_size
,
1
,
None
,
None
,
1
)))
inputs
.
append
((
'goal_loc'
,
tf
.
float32
,
(
problem
.
batch_size
,
problem
.
num_goals
,
2
)))
common_input_data
,
_
=
tf_utils
.
setup_inputs
(
inputs
)
inputs
=
[]
if
problem
.
input_type
==
'vision'
:
# Multiple images from an array of cameras.
inputs
.
append
((
'imgs'
,
tf
.
float32
,
(
problem
.
batch_size
,
None
,
len
(
problem
.
aux_delta_thetas
)
+
1
,
problem
.
img_height
,
problem
.
img_width
,
problem
.
img_channels
)))
elif
problem
.
input_type
==
'analytical_counts'
:
for
i
in
range
(
len
(
problem
.
map_crop_sizes
)):
inputs
.
append
((
'analytical_counts_{:d}'
.
format
(
i
),
tf
.
float32
,
(
problem
.
batch_size
,
None
,
problem
.
map_crop_sizes
[
i
],
problem
.
map_crop_sizes
[
i
],
problem
.
map_channels
)))
if
problem
.
outputs
.
readout_maps
:
for
i
in
range
(
len
(
problem
.
readout_maps_crop_sizes
)):
inputs
.
append
((
'readout_maps_{:d}'
.
format
(
i
),
tf
.
float32
,
(
problem
.
batch_size
,
None
,
problem
.
readout_maps_crop_sizes
[
i
],
problem
.
readout_maps_crop_sizes
[
i
],
problem
.
readout_maps_channels
)))
for
i
in
range
(
len
(
problem
.
map_crop_sizes
)):
inputs
.
append
((
'ego_goal_imgs_{:d}'
.
format
(
i
),
tf
.
float32
,
(
problem
.
batch_size
,
None
,
problem
.
map_crop_sizes
[
i
],
problem
.
map_crop_sizes
[
i
],
problem
.
goal_channels
)))
for
s
in
[
'sum_num'
,
'sum_denom'
,
'max_denom'
]:
inputs
.
append
((
'running_'
+
s
+
'_{:d}'
.
format
(
i
),
tf
.
float32
,
(
problem
.
batch_size
,
1
,
problem
.
map_crop_sizes
[
i
],
problem
.
map_crop_sizes
[
i
],
problem
.
map_channels
)))
inputs
.
append
((
'incremental_locs'
,
tf
.
float32
,
(
problem
.
batch_size
,
None
,
2
)))
inputs
.
append
((
'incremental_thetas'
,
tf
.
float32
,
(
problem
.
batch_size
,
None
,
1
)))
inputs
.
append
((
'step_number'
,
tf
.
int32
,
(
1
,
None
,
1
)))
inputs
.
append
((
'node_ids'
,
tf
.
int32
,
(
problem
.
batch_size
,
None
,
problem
.
node_ids_dim
)))
inputs
.
append
((
'perturbs'
,
tf
.
float32
,
(
problem
.
batch_size
,
None
,
problem
.
perturbs_dim
)))
# For plotting result plots
inputs
.
append
((
'loc_on_map'
,
tf
.
float32
,
(
problem
.
batch_size
,
None
,
2
)))
inputs
.
append
((
'gt_dist_to_goal'
,
tf
.
float32
,
(
problem
.
batch_size
,
None
,
1
)))
step_input_data
,
_
=
tf_utils
.
setup_inputs
(
inputs
)
inputs
=
[]
inputs
.
append
((
'action'
,
tf
.
int32
,
(
problem
.
batch_size
,
None
,
problem
.
num_actions
)))
train_data
,
_
=
tf_utils
.
setup_inputs
(
inputs
)
train_data
.
update
(
step_input_data
)
train_data
.
update
(
common_input_data
)
return
common_input_data
,
step_input_data
,
train_data
def
readout_general
(
multi_scale_belief
,
num_neurons
,
strides
,
layers_per_block
,
kernel_size
,
batch_norm_is_training_op
,
wt_decay
):
multi_scale_belief
=
tf
.
stop_gradient
(
multi_scale_belief
)
with
tf
.
variable_scope
(
'readout_maps_deconv'
):
x
,
outs
=
deconv
(
multi_scale_belief
,
batch_norm_is_training_op
,
wt_decay
=
wt_decay
,
neurons
=
num_neurons
,
strides
=
strides
,
layers_per_block
=
layers_per_block
,
kernel_size
=
kernel_size
,
conv_fn
=
slim
.
conv2d_transpose
,
offset
=
0
,
name
=
'readout_maps_deconv'
)
probs
=
tf
.
sigmoid
(
x
)
return
x
,
probs
def
running_combine
(
fss_logits
,
confs_probs
,
incremental_locs
,
incremental_thetas
,
previous_sum_num
,
previous_sum_denom
,
previous_max_denom
,
map_size
,
num_steps
):
# fss_logits is B x N x H x W x C
# confs_logits is B x N x H x W x C
# incremental_locs is B x N x 2
# incremental_thetas is B x N x 1
# previous_sum_num etc is B x 1 x H x W x C
with
tf
.
name_scope
(
'combine_{:d}'
.
format
(
num_steps
)):
running_sum_nums_
=
[];
running_sum_denoms_
=
[];
running_max_denoms_
=
[];
fss_logits_
=
tf
.
unstack
(
fss_logits
,
axis
=
1
,
num
=
num_steps
)
confs_probs_
=
tf
.
unstack
(
confs_probs
,
axis
=
1
,
num
=
num_steps
)
incremental_locs_
=
tf
.
unstack
(
incremental_locs
,
axis
=
1
,
num
=
num_steps
)
incremental_thetas_
=
tf
.
unstack
(
incremental_thetas
,
axis
=
1
,
num
=
num_steps
)
running_sum_num
=
tf
.
unstack
(
previous_sum_num
,
axis
=
1
,
num
=
1
)[
0
]
running_sum_denom
=
tf
.
unstack
(
previous_sum_denom
,
axis
=
1
,
num
=
1
)[
0
]
running_max_denom
=
tf
.
unstack
(
previous_max_denom
,
axis
=
1
,
num
=
1
)[
0
]
for
i
in
range
(
num_steps
):
# Rotate the previous running_num and running_denom
running_sum_num
,
running_sum_denom
,
running_max_denom
=
rotate_preds
(
incremental_locs_
[
i
],
incremental_thetas_
[
i
],
map_size
,
[
running_sum_num
,
running_sum_denom
,
running_max_denom
],
output_valid_mask
=
False
)[
0
]
# print i, num_steps, running_sum_num.get_shape().as_list()
running_sum_num
=
running_sum_num
+
fss_logits_
[
i
]
*
confs_probs_
[
i
]
running_sum_denom
=
running_sum_denom
+
confs_probs_
[
i
]
running_max_denom
=
tf
.
maximum
(
running_max_denom
,
confs_probs_
[
i
])
running_sum_nums_
.
append
(
running_sum_num
)
running_sum_denoms_
.
append
(
running_sum_denom
)
running_max_denoms_
.
append
(
running_max_denom
)
running_sum_nums
=
tf
.
stack
(
running_sum_nums_
,
axis
=
1
)
running_sum_denoms
=
tf
.
stack
(
running_sum_denoms_
,
axis
=
1
)
running_max_denoms
=
tf
.
stack
(
running_max_denoms_
,
axis
=
1
)
return
running_sum_nums
,
running_sum_denoms
,
running_max_denoms
def
get_map_from_images
(
imgs
,
mapper_arch
,
task_params
,
freeze_conv
,
wt_decay
,
is_training
,
batch_norm_is_training_op
,
num_maps
,
split_maps
=
True
):
# Hit image with a resnet.
n_views
=
len
(
task_params
.
aux_delta_thetas
)
+
1
out
=
utils
.
Foo
()
images_reshaped
=
tf
.
reshape
(
imgs
,
shape
=
[
-
1
,
task_params
.
img_height
,
task_params
.
img_width
,
task_params
.
img_channels
],
name
=
're_image'
)
x
,
out
.
vars_to_restore
=
get_repr_from_image
(
images_reshaped
,
task_params
.
modalities
,
task_params
.
data_augment
,
mapper_arch
.
encoder
,
freeze_conv
,
wt_decay
,
is_training
)
# Reshape into nice things so that these can be accumulated over time steps
# for faster backprop.
sh_before
=
x
.
get_shape
().
as_list
()
out
.
encoder_output
=
tf
.
reshape
(
x
,
shape
=
[
task_params
.
batch_size
,
-
1
,
n_views
]
+
sh_before
[
1
:])
x
=
tf
.
reshape
(
out
.
encoder_output
,
shape
=
[
-
1
]
+
sh_before
[
1
:])
# Add a layer to reduce dimensions for a fc layer.
if
mapper_arch
.
dim_reduce_neurons
>
0
:
ks
=
1
;
neurons
=
mapper_arch
.
dim_reduce_neurons
;
init_var
=
np
.
sqrt
(
2.0
/
(
ks
**
2
)
/
neurons
)
batch_norm_param
=
mapper_arch
.
batch_norm_param
batch_norm_param
[
'is_training'
]
=
batch_norm_is_training_op
out
.
conv_feat
=
slim
.
conv2d
(
x
,
neurons
,
kernel_size
=
ks
,
stride
=
1
,
normalizer_fn
=
slim
.
batch_norm
,
normalizer_params
=
batch_norm_param
,
padding
=
'SAME'
,
scope
=
'dim_reduce'
,
weights_regularizer
=
slim
.
l2_regularizer
(
wt_decay
),
weights_initializer
=
tf
.
random_normal_initializer
(
stddev
=
init_var
))
reshape_conv_feat
=
slim
.
flatten
(
out
.
conv_feat
)
sh
=
reshape_conv_feat
.
get_shape
().
as_list
()
out
.
reshape_conv_feat
=
tf
.
reshape
(
reshape_conv_feat
,
shape
=
[
-
1
,
sh
[
1
]
*
n_views
])
with
tf
.
variable_scope
(
'fc'
):
# Fully connected layers to compute the representation in top-view space.
fc_batch_norm_param
=
{
'center'
:
True
,
'scale'
:
True
,
'activation_fn'
:
tf
.
nn
.
relu
,
'is_training'
:
batch_norm_is_training_op
}
f
=
out
.
reshape_conv_feat
out_neurons
=
(
mapper_arch
.
fc_out_size
**
2
)
*
mapper_arch
.
fc_out_neurons
neurons
=
mapper_arch
.
fc_neurons
+
[
out_neurons
]
f
,
_
=
tf_utils
.
fc_network
(
f
,
neurons
=
neurons
,
wt_decay
=
wt_decay
,
name
=
'fc'
,
offset
=
0
,
batch_norm_param
=
fc_batch_norm_param
,
is_training
=
is_training
,
dropout_ratio
=
mapper_arch
.
fc_dropout
)
f
=
tf
.
reshape
(
f
,
shape
=
[
-
1
,
mapper_arch
.
fc_out_size
,
mapper_arch
.
fc_out_size
,
mapper_arch
.
fc_out_neurons
],
name
=
're_fc'
)
# Use pool5 to predict the free space map via deconv layers.
with
tf
.
variable_scope
(
'deconv'
):
x
,
outs
=
deconv
(
f
,
batch_norm_is_training_op
,
wt_decay
=
wt_decay
,
neurons
=
mapper_arch
.
deconv_neurons
,
strides
=
mapper_arch
.
deconv_strides
,
layers_per_block
=
mapper_arch
.
deconv_layers_per_block
,
kernel_size
=
mapper_arch
.
deconv_kernel_size
,
conv_fn
=
slim
.
conv2d_transpose
,
offset
=
0
,
name
=
'deconv'
)
# Reshape x the right way.
sh
=
x
.
get_shape
().
as_list
()
x
=
tf
.
reshape
(
x
,
shape
=
[
task_params
.
batch_size
,
-
1
]
+
sh
[
1
:])
out
.
deconv_output
=
x
# Separate out the map and the confidence predictions, pass the confidence
# through a sigmoid.
if
split_maps
:
with
tf
.
name_scope
(
'split'
):
out_all
=
tf
.
split
(
value
=
x
,
axis
=
4
,
num_or_size_splits
=
2
*
num_maps
)
out
.
fss_logits
=
out_all
[:
num_maps
]
out
.
confs_logits
=
out_all
[
num_maps
:]
with
tf
.
name_scope
(
'sigmoid'
):
out
.
confs_probs
=
[
tf
.
nn
.
sigmoid
(
x
)
for
x
in
out
.
confs_logits
]
return
out
def
setup_to_run
(
m
,
args
,
is_training
,
batch_norm_is_training
,
summary_mode
):
assert
(
args
.
arch
.
multi_scale
),
'removed support for old single scale code.'
# Set up the model.
tf
.
set_random_seed
(
args
.
solver
.
seed
)
task_params
=
args
.
navtask
.
task_params
batch_norm_is_training_op
=
\
tf
.
placeholder_with_default
(
batch_norm_is_training
,
shape
=
[],
name
=
'batch_norm_is_training_op'
)
# Setup the inputs
m
.
input_tensors
=
{}
m
.
train_ops
=
{}
m
.
input_tensors
[
'common'
],
m
.
input_tensors
[
'step'
],
m
.
input_tensors
[
'train'
]
=
\
_inputs
(
task_params
)
m
.
init_fn
=
None
if
task_params
.
input_type
==
'vision'
:
m
.
vision_ops
=
get_map_from_images
(
m
.
input_tensors
[
'step'
][
'imgs'
],
args
.
mapper_arch
,
task_params
,
args
.
solver
.
freeze_conv
,
args
.
solver
.
wt_decay
,
is_training
,
batch_norm_is_training_op
,
num_maps
=
len
(
task_params
.
map_crop_sizes
))
# Load variables from snapshot if needed.
if
args
.
solver
.
pretrained_path
is
not
None
:
m
.
init_fn
=
slim
.
assign_from_checkpoint_fn
(
args
.
solver
.
pretrained_path
,
m
.
vision_ops
.
vars_to_restore
)
# Set up caching of vision features if needed.
if
args
.
solver
.
freeze_conv
:
m
.
train_ops
[
'step_data_cache'
]
=
[
m
.
vision_ops
.
encoder_output
]
else
:
m
.
train_ops
[
'step_data_cache'
]
=
[]
# Set up blobs that are needed for the computation in rest of the graph.
m
.
ego_map_ops
=
m
.
vision_ops
.
fss_logits
m
.
coverage_ops
=
m
.
vision_ops
.
confs_probs
# Zero pad these to make them same size as what the planner expects.
for
i
in
range
(
len
(
m
.
ego_map_ops
)):
if
args
.
mapper_arch
.
pad_map_with_zeros_each
[
i
]
>
0
:
paddings
=
np
.
zeros
((
5
,
2
),
dtype
=
np
.
int32
)
paddings
[
2
:
4
,:]
=
args
.
mapper_arch
.
pad_map_with_zeros_each
[
i
]
paddings_op
=
tf
.
constant
(
paddings
,
dtype
=
tf
.
int32
)
m
.
ego_map_ops
[
i
]
=
tf
.
pad
(
m
.
ego_map_ops
[
i
],
paddings
=
paddings_op
)
m
.
coverage_ops
[
i
]
=
tf
.
pad
(
m
.
coverage_ops
[
i
],
paddings
=
paddings_op
)
elif
task_params
.
input_type
==
'analytical_counts'
:
m
.
ego_map_ops
=
[];
m
.
coverage_ops
=
[]
for
i
in
range
(
len
(
task_params
.
map_crop_sizes
)):
ego_map_op
=
m
.
input_tensors
[
'step'
][
'analytical_counts_{:d}'
.
format
(
i
)]
coverage_op
=
tf
.
cast
(
tf
.
greater_equal
(
tf
.
reduce_max
(
ego_map_op
,
reduction_indices
=
[
4
],
keep_dims
=
True
),
1
),
tf
.
float32
)
coverage_op
=
tf
.
ones_like
(
ego_map_op
)
*
coverage_op
m
.
ego_map_ops
.
append
(
ego_map_op
)
m
.
coverage_ops
.
append
(
coverage_op
)
m
.
train_ops
[
'step_data_cache'
]
=
[]
num_steps
=
task_params
.
num_steps
num_goals
=
task_params
.
num_goals
map_crop_size_ops
=
[]
for
map_crop_size
in
task_params
.
map_crop_sizes
:
map_crop_size_ops
.
append
(
tf
.
constant
(
map_crop_size
,
dtype
=
tf
.
int32
,
shape
=
(
2
,)))
with
tf
.
name_scope
(
'check_size'
):
is_single_step
=
tf
.
equal
(
tf
.
unstack
(
tf
.
shape
(
m
.
ego_map_ops
[
0
]),
num
=
5
)[
1
],
1
)
fr_ops
=
[];
value_ops
=
[];
fr_intermediate_ops
=
[];
value_intermediate_ops
=
[];
crop_value_ops
=
[];
resize_crop_value_ops
=
[];
confs
=
[];
occupancys
=
[];
previous_value_op
=
None
updated_state
=
[];
state_names
=
[];
for
i
in
range
(
len
(
task_params
.
map_crop_sizes
)):
map_crop_size
=
task_params
.
map_crop_sizes
[
i
]
with
tf
.
variable_scope
(
'scale_{:d}'
.
format
(
i
)):
# Accumulate the map.
fn
=
lambda
ns
:
running_combine
(
m
.
ego_map_ops
[
i
],
m
.
coverage_ops
[
i
],
m
.
input_tensors
[
'step'
][
'incremental_locs'
]
*
task_params
.
map_scales
[
i
],
m
.
input_tensors
[
'step'
][
'incremental_thetas'
],
m
.
input_tensors
[
'step'
][
'running_sum_num_{:d}'
.
format
(
i
)],
m
.
input_tensors
[
'step'
][
'running_sum_denom_{:d}'
.
format
(
i
)],
m
.
input_tensors
[
'step'
][
'running_max_denom_{:d}'
.
format
(
i
)],
map_crop_size
,
ns
)
running_sum_num
,
running_sum_denom
,
running_max_denom
=
\
tf
.
cond
(
is_single_step
,
lambda
:
fn
(
1
),
lambda
:
fn
(
num_steps
*
num_goals
))
updated_state
+=
[
running_sum_num
,
running_sum_denom
,
running_max_denom
]
state_names
+=
[
'running_sum_num_{:d}'
.
format
(
i
),
'running_sum_denom_{:d}'
.
format
(
i
),
'running_max_denom_{:d}'
.
format
(
i
)]
# Concat the accumulated map and goal
occupancy
=
running_sum_num
/
tf
.
maximum
(
running_sum_denom
,
0.001
)
conf
=
running_max_denom
# print occupancy.get_shape().as_list()
# Concat occupancy, how much occupied and goal.
with
tf
.
name_scope
(
'concat'
):
sh
=
[
-
1
,
map_crop_size
,
map_crop_size
,
task_params
.
map_channels
]
occupancy
=
tf
.
reshape
(
occupancy
,
shape
=
sh
)
conf
=
tf
.
reshape
(
conf
,
shape
=
sh
)
sh
=
[
-
1
,
map_crop_size
,
map_crop_size
,
task_params
.
goal_channels
]
goal
=
tf
.
reshape
(
m
.
input_tensors
[
'step'
][
'ego_goal_imgs_{:d}'
.
format
(
i
)],
shape
=
sh
)
to_concat
=
[
occupancy
,
conf
,
goal
]
if
previous_value_op
is
not
None
:
to_concat
.
append
(
previous_value_op
)
x
=
tf
.
concat
(
to_concat
,
3
)
# Pass the map, previous rewards and the goal through a few convolutional
# layers to get fR.
fr_op
,
fr_intermediate_op
=
fr_v2
(
x
,
output_neurons
=
args
.
arch
.
fr_neurons
,
inside_neurons
=
args
.
arch
.
fr_inside_neurons
,
is_training
=
batch_norm_is_training_op
,
name
=
'fr'
,
wt_decay
=
args
.
solver
.
wt_decay
,
stride
=
args
.
arch
.
fr_stride
)
# Do Value Iteration on the fR
if
args
.
arch
.
vin_num_iters
>
0
:
value_op
,
value_intermediate_op
=
value_iteration_network
(
fr_op
,
num_iters
=
args
.
arch
.
vin_num_iters
,
val_neurons
=
args
.
arch
.
vin_val_neurons
,
action_neurons
=
args
.
arch
.
vin_action_neurons
,
kernel_size
=
args
.
arch
.
vin_ks
,
share_wts
=
args
.
arch
.
vin_share_wts
,
name
=
'vin'
,
wt_decay
=
args
.
solver
.
wt_decay
)
else
:
value_op
=
fr_op
value_intermediate_op
=
[]
# Crop out and upsample the previous value map.
remove
=
args
.
arch
.
crop_remove_each
if
remove
>
0
:
crop_value_op
=
value_op
[:,
remove
:
-
remove
,
remove
:
-
remove
,:]
else
:
crop_value_op
=
value_op
crop_value_op
=
tf
.
reshape
(
crop_value_op
,
shape
=
[
-
1
,
args
.
arch
.
value_crop_size
,
args
.
arch
.
value_crop_size
,
args
.
arch
.
vin_val_neurons
])
if
i
<
len
(
task_params
.
map_crop_sizes
)
-
1
:
# Reshape it to shape of the next scale.
previous_value_op
=
tf
.
image
.
resize_bilinear
(
crop_value_op
,
map_crop_size_ops
[
i
+
1
],
align_corners
=
True
)
resize_crop_value_ops
.
append
(
previous_value_op
)
occupancys
.
append
(
occupancy
)
confs
.
append
(
conf
)
value_ops
.
append
(
value_op
)
crop_value_ops
.
append
(
crop_value_op
)
fr_ops
.
append
(
fr_op
)
fr_intermediate_ops
.
append
(
fr_intermediate_op
)
m
.
value_ops
=
value_ops
m
.
value_intermediate_ops
=
value_intermediate_ops
m
.
fr_ops
=
fr_ops
m
.
fr_intermediate_ops
=
fr_intermediate_ops
m
.
final_value_op
=
crop_value_op
m
.
crop_value_ops
=
crop_value_ops
m
.
resize_crop_value_ops
=
resize_crop_value_ops
m
.
confs
=
confs
m
.
occupancys
=
occupancys
sh
=
[
-
1
,
args
.
arch
.
vin_val_neurons
*
((
args
.
arch
.
value_crop_size
)
**
2
)]
m
.
value_features_op
=
tf
.
reshape
(
m
.
final_value_op
,
sh
,
name
=
'reshape_value_op'
)
# Determine what action to take.
with
tf
.
variable_scope
(
'action_pred'
):
batch_norm_param
=
args
.
arch
.
pred_batch_norm_param
if
batch_norm_param
is
not
None
:
batch_norm_param
[
'is_training'
]
=
batch_norm_is_training_op
m
.
action_logits_op
,
_
=
tf_utils
.
fc_network
(
m
.
value_features_op
,
neurons
=
args
.
arch
.
pred_neurons
,
wt_decay
=
args
.
solver
.
wt_decay
,
name
=
'pred'
,
offset
=
0
,
num_pred
=
task_params
.
num_actions
,
batch_norm_param
=
batch_norm_param
)
m
.
action_prob_op
=
tf
.
nn
.
softmax
(
m
.
action_logits_op
)
init_state
=
tf
.
constant
(
0.
,
dtype
=
tf
.
float32
,
shape
=
[
task_params
.
batch_size
,
1
,
map_crop_size
,
map_crop_size
,
task_params
.
map_channels
])
m
.
train_ops
[
'state_names'
]
=
state_names
m
.
train_ops
[
'updated_state'
]
=
updated_state
m
.
train_ops
[
'init_state'
]
=
[
init_state
for
_
in
updated_state
]
m
.
train_ops
[
'step'
]
=
m
.
action_prob_op
m
.
train_ops
[
'common'
]
=
[
m
.
input_tensors
[
'common'
][
'orig_maps'
],
m
.
input_tensors
[
'common'
][
'goal_loc'
]]
m
.
train_ops
[
'batch_norm_is_training_op'
]
=
batch_norm_is_training_op
m
.
loss_ops
=
[];
m
.
loss_ops_names
=
[];
if
args
.
arch
.
readout_maps
:
with
tf
.
name_scope
(
'readout_maps'
):
all_occupancys
=
tf
.
concat
(
m
.
occupancys
+
m
.
confs
,
3
)
readout_maps
,
probs
=
readout_general
(
all_occupancys
,
num_neurons
=
args
.
arch
.
rom_arch
.
num_neurons
,
strides
=
args
.
arch
.
rom_arch
.
strides
,
layers_per_block
=
args
.
arch
.
rom_arch
.
layers_per_block
,
kernel_size
=
args
.
arch
.
rom_arch
.
kernel_size
,
batch_norm_is_training_op
=
batch_norm_is_training_op
,
wt_decay
=
args
.
solver
.
wt_decay
)
gt_ego_maps
=
[
m
.
input_tensors
[
'step'
][
'readout_maps_{:d}'
.
format
(
i
)]
for
i
in
range
(
len
(
task_params
.
readout_maps_crop_sizes
))]
m
.
readout_maps_gt
=
tf
.
concat
(
gt_ego_maps
,
4
)
gt_shape
=
tf
.
shape
(
m
.
readout_maps_gt
)
m
.
readout_maps_logits
=
tf
.
reshape
(
readout_maps
,
gt_shape
)
m
.
readout_maps_probs
=
tf
.
reshape
(
probs
,
gt_shape
)
# Add a loss op
m
.
readout_maps_loss_op
=
tf
.
losses
.
sigmoid_cross_entropy
(
tf
.
reshape
(
m
.
readout_maps_gt
,
[
-
1
,
len
(
task_params
.
readout_maps_crop_sizes
)]),
tf
.
reshape
(
readout_maps
,
[
-
1
,
len
(
task_params
.
readout_maps_crop_sizes
)]),
scope
=
'loss'
)
m
.
readout_maps_loss_op
=
10.
*
m
.
readout_maps_loss_op
ewma_decay
=
0.99
if
is_training
else
0.0
weight
=
tf
.
ones_like
(
m
.
input_tensors
[
'train'
][
'action'
],
dtype
=
tf
.
float32
,
name
=
'weight'
)
m
.
reg_loss_op
,
m
.
data_loss_op
,
m
.
total_loss_op
,
m
.
acc_ops
=
\
compute_losses_multi_or
(
m
.
action_logits_op
,
m
.
input_tensors
[
'train'
][
'action'
],
weights
=
weight
,
num_actions
=
task_params
.
num_actions
,
data_loss_wt
=
args
.
solver
.
data_loss_wt
,
reg_loss_wt
=
args
.
solver
.
reg_loss_wt
,
ewma_decay
=
ewma_decay
)
if
args
.
arch
.
readout_maps
:
m
.
total_loss_op
=
m
.
total_loss_op
+
m
.
readout_maps_loss_op
m
.
loss_ops
+=
[
m
.
readout_maps_loss_op
]
m
.
loss_ops_names
+=
[
'readout_maps_loss'
]
m
.
loss_ops
+=
[
m
.
reg_loss_op
,
m
.
data_loss_op
,
m
.
total_loss_op
]
m
.
loss_ops_names
+=
[
'reg_loss'
,
'data_loss'
,
'total_loss'
]
if
args
.
solver
.
freeze_conv
:
vars_to_optimize
=
list
(
set
(
tf
.
trainable_variables
())
-
set
(
m
.
vision_ops
.
vars_to_restore
))
else
:
vars_to_optimize
=
None
m
.
lr_op
,
m
.
global_step_op
,
m
.
train_op
,
m
.
should_stop_op
,
m
.
optimizer
,
\
m
.
sync_optimizer
=
tf_utils
.
setup_training
(
m
.
total_loss_op
,
args
.
solver
.
initial_learning_rate
,
args
.
solver
.
steps_per_decay
,
args
.
solver
.
learning_rate_decay
,
args
.
solver
.
momentum
,
args
.
solver
.
max_steps
,
args
.
solver
.
sync
,
args
.
solver
.
adjust_lr_sync
,
args
.
solver
.
num_workers
,
args
.
solver
.
task
,
vars_to_optimize
=
vars_to_optimize
,
clip_gradient_norm
=
args
.
solver
.
clip_gradient_norm
,
typ
=
args
.
solver
.
typ
,
momentum2
=
args
.
solver
.
momentum2
,
adam_eps
=
args
.
solver
.
adam_eps
)
if
args
.
arch
.
sample_gt_prob_type
==
'inverse_sigmoid_decay'
:
m
.
sample_gt_prob_op
=
tf_utils
.
inverse_sigmoid_decay
(
args
.
arch
.
isd_k
,
m
.
global_step_op
)
elif
args
.
arch
.
sample_gt_prob_type
==
'zero'
:
m
.
sample_gt_prob_op
=
tf
.
constant
(
-
1.0
,
dtype
=
tf
.
float32
)
elif
args
.
arch
.
sample_gt_prob_type
.
split
(
'_'
)[
0
]
==
'step'
:
step
=
int
(
args
.
arch
.
sample_gt_prob_type
.
split
(
'_'
)[
1
])
m
.
sample_gt_prob_op
=
tf_utils
.
step_gt_prob
(
step
,
m
.
input_tensors
[
'step'
][
'step_number'
][
0
,
0
,
0
])
m
.
sample_action_type
=
args
.
arch
.
action_sample_type
m
.
sample_action_combine_type
=
args
.
arch
.
action_sample_combine_type
m
.
summary_ops
=
{
summary_mode
:
_add_summaries
(
m
,
args
,
summary_mode
,
args
.
summary
.
arop_full_summary_iters
)}
m
.
init_op
=
tf
.
group
(
tf
.
global_variables_initializer
(),
tf
.
local_variables_initializer
())
m
.
saver_op
=
tf
.
train
.
Saver
(
keep_checkpoint_every_n_hours
=
4
,
write_version
=
tf
.
train
.
SaverDef
.
V2
)
return
m
research/cognitive_mapping_and_planning/tfcode/cmp_summary.py
deleted
100644 → 0
View file @
09bc9f54
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for setting up summaries for CMP.
"""
import
sys
,
os
,
numpy
as
np
import
matplotlib.pyplot
as
plt
import
tensorflow
as
tf
from
tensorflow.contrib
import
slim
from
tensorflow.contrib.slim
import
arg_scope
import
logging
from
tensorflow.python.platform
import
app
from
tensorflow.python.platform
import
flags
from
src
import
utils
import
src.file_utils
as
fu
import
tfcode.nav_utils
as
nu
def
_vis_readout_maps
(
outputs
,
global_step
,
output_dir
,
metric_summary
,
N
):
# outputs is [gt_map, pred_map]:
if
N
>=
0
:
outputs
=
outputs
[:
N
]
N
=
len
(
outputs
)
plt
.
set_cmap
(
'jet'
)
fig
,
axes
=
utils
.
subplot
(
plt
,
(
N
,
outputs
[
0
][
0
].
shape
[
4
]
*
2
),
(
5
,
5
))
axes
=
axes
.
ravel
()[::
-
1
].
tolist
()
for
i
in
range
(
N
):
gt_map
,
pred_map
=
outputs
[
i
]
for
j
in
[
0
]:
for
k
in
range
(
gt_map
.
shape
[
4
]):
# Display something like the midpoint of the trajectory.
id
=
np
.
int
(
gt_map
.
shape
[
1
]
/
2
)
ax
=
axes
.
pop
();
ax
.
imshow
(
gt_map
[
j
,
id
,:,:,
k
],
origin
=
'lower'
,
interpolation
=
'none'
,
vmin
=
0.
,
vmax
=
1.
)
ax
.
set_axis_off
();
if
i
==
0
:
ax
.
set_title
(
'gt_map'
)
ax
=
axes
.
pop
();
ax
.
imshow
(
pred_map
[
j
,
id
,:,:,
k
],
origin
=
'lower'
,
interpolation
=
'none'
,
vmin
=
0.
,
vmax
=
1.
)
ax
.
set_axis_off
();
if
i
==
0
:
ax
.
set_title
(
'pred_map'
)
file_name
=
os
.
path
.
join
(
output_dir
,
'readout_map_{:d}.png'
.
format
(
global_step
))
with
fu
.
fopen
(
file_name
,
'w'
)
as
f
:
fig
.
savefig
(
f
,
bbox_inches
=
'tight'
,
transparent
=
True
,
pad_inches
=
0
)
plt
.
close
(
fig
)
def
_vis
(
outputs
,
global_step
,
output_dir
,
metric_summary
,
N
):
# Plot the value map, goal for various maps to see what if the model is
# learning anything useful.
#
# outputs is [values, goals, maps, occupancy, conf].
#
if
N
>=
0
:
outputs
=
outputs
[:
N
]
N
=
len
(
outputs
)
plt
.
set_cmap
(
'jet'
)
fig
,
axes
=
utils
.
subplot
(
plt
,
(
N
,
outputs
[
0
][
0
].
shape
[
4
]
*
5
),
(
5
,
5
))
axes
=
axes
.
ravel
()[::
-
1
].
tolist
()
for
i
in
range
(
N
):
values
,
goals
,
maps
,
occupancy
,
conf
=
outputs
[
i
]
for
j
in
[
0
]:
for
k
in
range
(
values
.
shape
[
4
]):
# Display something like the midpoint of the trajectory.
id
=
np
.
int
(
values
.
shape
[
1
]
/
2
)
ax
=
axes
.
pop
();
ax
.
imshow
(
goals
[
j
,
id
,:,:,
k
],
origin
=
'lower'
,
interpolation
=
'none'
)
ax
.
set_axis_off
();
if
i
==
0
:
ax
.
set_title
(
'goal'
)
ax
=
axes
.
pop
();
ax
.
imshow
(
occupancy
[
j
,
id
,:,:,
k
],
origin
=
'lower'
,
interpolation
=
'none'
)
ax
.
set_axis_off
();
if
i
==
0
:
ax
.
set_title
(
'occupancy'
)
ax
=
axes
.
pop
();
ax
.
imshow
(
conf
[
j
,
id
,:,:,
k
],
origin
=
'lower'
,
interpolation
=
'none'
,
vmin
=
0.
,
vmax
=
1.
)
ax
.
set_axis_off
();
if
i
==
0
:
ax
.
set_title
(
'conf'
)
ax
=
axes
.
pop
();
ax
.
imshow
(
values
[
j
,
id
,:,:,
k
],
origin
=
'lower'
,
interpolation
=
'none'
)
ax
.
set_axis_off
();
if
i
==
0
:
ax
.
set_title
(
'value'
)
ax
=
axes
.
pop
();
ax
.
imshow
(
maps
[
j
,
id
,:,:,
k
],
origin
=
'lower'
,
interpolation
=
'none'
)
ax
.
set_axis_off
();
if
i
==
0
:
ax
.
set_title
(
'incr map'
)
file_name
=
os
.
path
.
join
(
output_dir
,
'value_vis_{:d}.png'
.
format
(
global_step
))
with
fu
.
fopen
(
file_name
,
'w'
)
as
f
:
fig
.
savefig
(
f
,
bbox_inches
=
'tight'
,
transparent
=
True
,
pad_inches
=
0
)
plt
.
close
(
fig
)
def
_summary_vis
(
m
,
batch_size
,
num_steps
,
arop_full_summary_iters
):
arop
=
[];
arop_summary_iters
=
[];
arop_eval_fns
=
[];
vis_value_ops
=
[];
vis_goal_ops
=
[];
vis_map_ops
=
[];
vis_occupancy_ops
=
[];
vis_conf_ops
=
[];
for
i
,
val_op
in
enumerate
(
m
.
value_ops
):
vis_value_op
=
tf
.
reduce_mean
(
tf
.
abs
(
val_op
),
axis
=
3
,
keep_dims
=
True
)
vis_value_ops
.
append
(
vis_value_op
)
vis_occupancy_op
=
tf
.
reduce_mean
(
tf
.
abs
(
m
.
occupancys
[
i
]),
3
,
True
)
vis_occupancy_ops
.
append
(
vis_occupancy_op
)
vis_conf_op
=
tf
.
reduce_max
(
tf
.
abs
(
m
.
confs
[
i
]),
axis
=
3
,
keep_dims
=
True
)
vis_conf_ops
.
append
(
vis_conf_op
)
ego_goal_imgs_i_op
=
m
.
input_tensors
[
'step'
][
'ego_goal_imgs_{:d}'
.
format
(
i
)]
vis_goal_op
=
tf
.
reduce_max
(
ego_goal_imgs_i_op
,
4
,
True
)
vis_goal_ops
.
append
(
vis_goal_op
)
vis_map_op
=
tf
.
reduce_mean
(
tf
.
abs
(
m
.
ego_map_ops
[
i
]),
4
,
True
)
vis_map_ops
.
append
(
vis_map_op
)
vis_goal_ops
=
tf
.
concat
(
vis_goal_ops
,
4
)
vis_map_ops
=
tf
.
concat
(
vis_map_ops
,
4
)
vis_value_ops
=
tf
.
concat
(
vis_value_ops
,
3
)
vis_occupancy_ops
=
tf
.
concat
(
vis_occupancy_ops
,
3
)
vis_conf_ops
=
tf
.
concat
(
vis_conf_ops
,
3
)
sh
=
tf
.
unstack
(
tf
.
shape
(
vis_value_ops
))[
1
:]
vis_value_ops
=
tf
.
reshape
(
vis_value_ops
,
shape
=
[
batch_size
,
-
1
]
+
sh
)
sh
=
tf
.
unstack
(
tf
.
shape
(
vis_conf_ops
))[
1
:]
vis_conf_ops
=
tf
.
reshape
(
vis_conf_ops
,
shape
=
[
batch_size
,
-
1
]
+
sh
)
sh
=
tf
.
unstack
(
tf
.
shape
(
vis_occupancy_ops
))[
1
:]
vis_occupancy_ops
=
tf
.
reshape
(
vis_occupancy_ops
,
shape
=
[
batch_size
,
-
1
]
+
sh
)
# Save memory, only return time steps that need to be visualized, factor of
# 32 CPU memory saving.
id
=
np
.
int
(
num_steps
/
2
)
vis_goal_ops
=
tf
.
expand_dims
(
vis_goal_ops
[:,
id
,:,:,:],
axis
=
1
)
vis_map_ops
=
tf
.
expand_dims
(
vis_map_ops
[:,
id
,:,:,:],
axis
=
1
)
vis_value_ops
=
tf
.
expand_dims
(
vis_value_ops
[:,
id
,:,:,:],
axis
=
1
)
vis_conf_ops
=
tf
.
expand_dims
(
vis_conf_ops
[:,
id
,:,:,:],
axis
=
1
)
vis_occupancy_ops
=
tf
.
expand_dims
(
vis_occupancy_ops
[:,
id
,:,:,:],
axis
=
1
)
arop
+=
[[
vis_value_ops
,
vis_goal_ops
,
vis_map_ops
,
vis_occupancy_ops
,
vis_conf_ops
]]
arop_summary_iters
+=
[
arop_full_summary_iters
]
arop_eval_fns
+=
[
_vis
]
return
arop
,
arop_summary_iters
,
arop_eval_fns
def
_summary_readout_maps
(
m
,
num_steps
,
arop_full_summary_iters
):
arop
=
[];
arop_summary_iters
=
[];
arop_eval_fns
=
[];
id
=
np
.
int
(
num_steps
-
1
)
vis_readout_maps_gt
=
m
.
readout_maps_gt
vis_readout_maps_prob
=
tf
.
reshape
(
m
.
readout_maps_probs
,
shape
=
tf
.
shape
(
vis_readout_maps_gt
))
vis_readout_maps_gt
=
tf
.
expand_dims
(
vis_readout_maps_gt
[:,
id
,:,:,:],
1
)
vis_readout_maps_prob
=
tf
.
expand_dims
(
vis_readout_maps_prob
[:,
id
,:,:,:],
1
)
arop
+=
[[
vis_readout_maps_gt
,
vis_readout_maps_prob
]]
arop_summary_iters
+=
[
arop_full_summary_iters
]
arop_eval_fns
+=
[
_vis_readout_maps
]
return
arop
,
arop_summary_iters
,
arop_eval_fns
def
_add_summaries
(
m
,
args
,
summary_mode
,
arop_full_summary_iters
):
task_params
=
args
.
navtask
.
task_params
summarize_ops
=
[
m
.
lr_op
,
m
.
global_step_op
,
m
.
sample_gt_prob_op
]
+
\
m
.
loss_ops
+
m
.
acc_ops
summarize_names
=
[
'lr'
,
'global_step'
,
'sample_gt_prob_op'
]
+
\
m
.
loss_ops_names
+
[
'acc_{:d}'
.
format
(
i
)
for
i
in
range
(
len
(
m
.
acc_ops
))]
to_aggregate
=
[
0
,
0
,
0
]
+
[
1
]
*
len
(
m
.
loss_ops_names
)
+
[
1
]
*
len
(
m
.
acc_ops
)
scope_name
=
'summary'
with
tf
.
name_scope
(
scope_name
):
s_ops
=
nu
.
add_default_summaries
(
summary_mode
,
arop_full_summary_iters
,
summarize_ops
,
summarize_names
,
to_aggregate
,
m
.
action_prob_op
,
m
.
input_tensors
,
scope_name
=
scope_name
)
if
summary_mode
==
'val'
:
arop
,
arop_summary_iters
,
arop_eval_fns
=
_summary_vis
(
m
,
task_params
.
batch_size
,
task_params
.
num_steps
,
arop_full_summary_iters
)
s_ops
.
additional_return_ops
+=
arop
s_ops
.
arop_summary_iters
+=
arop_summary_iters
s_ops
.
arop_eval_fns
+=
arop_eval_fns
if
args
.
arch
.
readout_maps
:
arop
,
arop_summary_iters
,
arop_eval_fns
=
_summary_readout_maps
(
m
,
task_params
.
num_steps
,
arop_full_summary_iters
)
s_ops
.
additional_return_ops
+=
arop
s_ops
.
arop_summary_iters
+=
arop_summary_iters
s_ops
.
arop_eval_fns
+=
arop_eval_fns
return
s_ops
Prev
1
…
7
8
9
10
11
12
13
14
15
…
17
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment