Unverified Commit d5e826e3 authored by Steven Hickson's avatar Steven Hickson Committed by GitHub
Browse files

Merge branch 'master' into master

parents e1ac09e1 fc37f117
......@@ -4,6 +4,7 @@ from __future__ import print_function
"""Tasks that test correctness of algorithms."""
from six.moves import xrange
from common import reward as reward_lib # brain coder
from single_task import misc # brain coder
......@@ -124,5 +125,3 @@ class HillClimbingTask(object):
# closest next element.
# Maximum distance possible is num_actions * base / 2 = 3 * 8 / 2 = 12
return (len(prefix) + (1 - min_dist / 12.0)), False
......@@ -39,6 +39,7 @@ from absl import app
from absl import flags
from absl import logging
import numpy as np
from six.moves import xrange
import tensorflow as tf
from single_task import defaults # brain coder
......
......@@ -30,8 +30,8 @@ from tensorflow.python.platform import flags
import datasets.nav_env_config as nec
import datasets.nav_env as nav_env
import cv2
from datasets import factory
import render.swiftshader_renderer as renderer
from datasets import factory
import render.swiftshader_renderer as renderer
SwiftshaderRenderer = renderer.SwiftshaderRenderer
VisualNavigationEnv = nav_env.VisualNavigationEnv
......@@ -53,10 +53,10 @@ def get_args():
navtask.camera_param.width = sz
navtask.task_params.img_height = sz
navtask.task_params.img_width = sz
# navtask.task_params.semantic_task.class_map_names = ['chair', 'door', 'table']
# navtask.task_params.type = 'to_nearest_obj_acc'
logging.info('navtask: %s', navtask)
return navtask
......@@ -90,12 +90,12 @@ def walk_through(b):
root = tk.Tk()
image = b.render_nodes(b.task.nodes[[current_node],:])[0]
print image.shape
print(image.shape)
image = image.astype(np.uint8)
im = Image.fromarray(image)
im = ImageTk.PhotoImage(im)
panel = tk.Label(root, image=im)
map_size = b.traversible.shape
sc = np.max(map_size)/256.
loc = np.array([[map_size[1]/2., map_size[0]/2.]])
......@@ -128,15 +128,15 @@ def walk_through(b):
global current_node
current_node = b.take_action([current_node], [3], 1)[0][0]
refresh()
def right_key(event):
global current_node
current_node = b.take_action([current_node], [1], 1)[0][0]
refresh()
def quit(event):
root.destroy()
root.destroy()
panel_overhead.grid(row=4, column=5, rowspan=1, columnspan=1,
sticky=tk.W+tk.E+tk.N+tk.S)
panel.bind('<Left>', left_key)
......@@ -150,19 +150,19 @@ def walk_through(b):
def simple_window():
root = tk.Tk()
image = np.zeros((128, 128, 3), dtype=np.uint8)
image[32:96, 32:96, 0] = 255
im = Image.fromarray(image)
im = ImageTk.PhotoImage(im)
image = np.zeros((128, 128, 3), dtype=np.uint8)
image[32:96, 32:96, 1] = 255
im2 = Image.fromarray(image)
im2 = ImageTk.PhotoImage(im2)
panel = tk.Label(root, image=im)
def left_key(event):
panel.configure(image=im2)
panel.image = im2
......@@ -176,7 +176,7 @@ def simple_window():
panel.bind('q', quit)
panel.focus_set()
panel.pack(side = "bottom", fill = "both", expand = "yes")
root.mainloop()
root.mainloop()
def main(_):
b = load_building(FLAGS.dataset_name, FLAGS.building_name)
......
......@@ -17,7 +17,7 @@ r"""
Code for plotting trajectories in the top view, and also plot first person views
from saved trajectories. Does not run the network but only loads the mesh data
to plot the view points.
CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_plot_trajectory.py \
--first_person --num_steps 40 \
--config_name cmp.lmap_Msc.clip5.sbpd_d_r2r \
......@@ -36,13 +36,13 @@ from tensorflow.contrib import slim
import cv2
import logging
from tensorflow.python.platform import gfile
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from datasets import nav_env
import scripts.script_nav_agent_release as sna
import scripts.script_nav_agent_release as sna
import src.file_utils as fu
from src import graph_utils
from src import graph_utils
from src import utils
FLAGS = flags.FLAGS
......@@ -95,7 +95,7 @@ def _compute_hardness():
# Initialize the agent.
init_env_state = e.reset(rng_data)
gt_dist_to_goal = [e.episode.dist_to_goal[0][j][s]
gt_dist_to_goal = [e.episode.dist_to_goal[0][j][s]
for j, s in enumerate(e.episode.start_node_ids)]
for j in range(args.navtask.task_params.batch_size):
......@@ -120,15 +120,15 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(),
FLAGS.imset)
fu.makedirs(out_dir)
# Load the model so that we can render.
plt.set_cmap('gray')
samples_per_action = 8; wait_at_action = 0;
Writer = animation.writers['mencoder']
writer = Writer(fps=3*(samples_per_action+wait_at_action),
writer = Writer(fps=3*(samples_per_action+wait_at_action),
metadata=dict(artist='anonymous'), bitrate=1800)
args = sna.get_args_for_config(FLAGS.config_name + '+bench_'+FLAGS.imset)
args.navtask.logdir = None
navtask_ = copy.deepcopy(args.navtask)
......@@ -142,10 +142,10 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
R = lambda: nav_env.get_multiplexer_class(navtask_, 0)
R = R()
b = R.buildings[0]
f = [0 for _ in range(wait_at_action)] + \
[float(_)/samples_per_action for _ in range(samples_per_action)];
# Generate things for it to render.
inds_to_do = []
inds_to_do += [1, 4, 10] #1291, 1268, 1273, 1289, 1302, 1426, 1413, 1449, 1399, 1390]
......@@ -163,7 +163,7 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
# axes = [ax]
for ax in axes:
ax.set_axis_off()
node_ids = dt['all_node_ids'][i, :, 0]*1
# Prune so that last node is not repeated more than 3 times?
if np.all(node_ids[-4:] == node_ids[-1]):
......@@ -185,7 +185,7 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
node_ids_all = np.reshape(node_ids_all[:-1,:], -1)
perturbs_all = np.reshape(perturbs_all, [-1, 4])
imgs = b.render_nodes(b.task.nodes[node_ids_all,:], perturb=perturbs_all)
# Get action at each node.
actions = []
_, action_to_nodes = b.get_feasible_actions(node_ids)
......@@ -193,7 +193,7 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
action_to_node = action_to_nodes[j]
node_to_action = dict(zip(action_to_node.values(), action_to_node.keys()))
actions.append(node_to_action[node_ids[j+1]])
def init_fn():
return fig,
gt_dist_to_goal = []
......@@ -205,8 +205,8 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
img = imgs[j]; ax = axes[0]; ax.clear(); ax.set_axis_off();
img = img.astype(np.uint8); ax.imshow(img);
tt = ax.set_title(
"First Person View\n" +
"Top corners show diagnostics (distance, agents' action) not input to agent.",
"First Person View\n" +
"Top corners show diagnostics (distance, agents' action) not input to agent.",
fontsize=12)
plt.setp(tt, color='white')
......@@ -218,9 +218,9 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
fontsize=20, color='red',
transform=ax.transAxes, alpha=1.0)
t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1))
# Action to take.
action_latex = ['$\odot$ ', '$\curvearrowright$ ', '$\curvearrowleft$ ', '$\Uparrow$ ']
action_latex = ['$\odot$ ', '$\curvearrowright$ ', '$\curvearrowleft$ ', r'$\Uparrow$ ']
t = ax.text(0.99, 0.99, action_latex[actions[step_number]],
horizontalalignment='right',
verticalalignment='top',
......@@ -256,7 +256,7 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
locs = np.expand_dims(locs, axis=0)
ax.plot(locs[:,0], locs[:,1], 'r.', alpha=1.0, linewidth=0, markersize=4)
tt = ax.set_title('Trajectory in topview', fontsize=14)
plt.setp(tt, color='white')
plt.setp(tt, color='white')
return fig,
line_ani = animation.FuncAnimation(fig, worker,
......@@ -265,7 +265,7 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
tmp_file_name = 'tmp.mp4'
line_ani.save(tmp_file_name, writer=writer, savefig_kwargs={'facecolor':'black'})
out_file_name = os.path.join(out_dir, 'vis_{:04d}.mp4'.format(i))
print out_file_name
print(out_file_name)
if fu.exists(out_file_name):
gfile.Remove(out_file_name)
......@@ -280,12 +280,12 @@ def plot_trajectory(dt, hardness, orig_maps, out_dir):
out_file = os.path.join(out_dir, 'all_locs_at_t.pkl')
dt['hardness'] = hardness
utils.save_variables(out_file, dt.values(), dt.keys(), overwrite=True)
#Plot trajectories onto the maps
plt.set_cmap('gray')
for i in range(4000):
goal_loc = dt['all_goal_locs'][i, :, :]
locs = np.concatenate((dt['all_locs'][i,:,:],
locs = np.concatenate((dt['all_locs'][i,:,:],
dt['all_locs'][i,:,:]), axis=0)
xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0))
xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0))
......@@ -305,35 +305,35 @@ def plot_trajectory(dt, hardness, orig_maps, out_dir):
uniq = np.array(uniq)
all_locs = all_locs[uniq, :]
ax.plot(dt['all_locs'][i, 0, 0],
ax.plot(dt['all_locs'][i, 0, 0],
dt['all_locs'][i, 0, 1], 'b.', markersize=24)
ax.plot(dt['all_goal_locs'][i, 0, 0],
ax.plot(dt['all_goal_locs'][i, 0, 0],
dt['all_goal_locs'][i, 0, 1], 'g*', markersize=19)
ax.plot(all_locs[:,0], all_locs[:,1], 'r', alpha=0.4, linewidth=2)
ax.scatter(all_locs[:,0], all_locs[:,1],
c=5+np.arange(all_locs.shape[0])*1./all_locs.shape[0],
c=5+np.arange(all_locs.shape[0])*1./all_locs.shape[0],
cmap='Reds', s=30, linewidth=0)
ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0, aspect='equal')
ax.set_xlim([xy1[0], xy2[0]])
ax.set_ylim([xy1[1], xy2[1]])
file_name = os.path.join(out_dir, 'trajectory_{:04d}.png'.format(i))
print file_name
with fu.fopen(file_name, 'w') as f:
print(file_name)
with fu.fopen(file_name, 'w') as f:
plt.savefig(f)
plt.close(fig)
def main(_):
a = _load_trajectory()
h_dists, gt_dists, orig_maps = _compute_hardness()
hardness = 1.-h_dists*1./ gt_dists
if FLAGS.top_view:
plot_trajectory(a, hardness, orig_maps, out_dir=FLAGS.out_dir)
if FLAGS.first_person:
plot_trajectory_first_person(a, orig_maps, out_dir=FLAGS.out_dir)
if __name__ == '__main__':
app.run()
......@@ -16,6 +16,7 @@
"""Utilities for manipulating files.
"""
import os
import numpy as np
import PIL
from tensorflow.python.platform import gfile
import cv2
......
......@@ -19,6 +19,7 @@ import skimage.morphology
import numpy as np
import networkx as nx
import itertools
import logging
import graph_tool as gt
import graph_tool.topology
import graph_tool.generation
......
......@@ -17,6 +17,7 @@
"""
import copy
import skimage.morphology
import logging
import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
......
......@@ -17,6 +17,7 @@ r"""Generaly Utilities.
"""
import numpy as np, cPickle, os, time
from six.moves import xrange
import src.file_utils as fu
import logging
......@@ -93,12 +94,12 @@ def tic_toc_print(interval, string):
global tic_toc_print_time_old
if 'tic_toc_print_time_old' not in globals():
tic_toc_print_time_old = time.time()
print string
print(string)
else:
new_time = time.time()
if new_time - tic_toc_print_time_old > interval:
tic_toc_print_time_old = new_time;
print string
print(string)
def mkdir_if_missing(output_dir):
if not fu.exists(output_dir):
......@@ -126,7 +127,7 @@ def load_variables(pickle_file_name):
def voc_ap(rec, prec):
rec = rec.reshape((-1,1))
prec = prec.reshape((-1,1))
z = np.zeros((1,1))
z = np.zeros((1,1))
o = np.ones((1,1))
mrec = np.vstack((z, rec, o))
mpre = np.vstack((z, prec, z))
......@@ -165,4 +166,3 @@ def calc_pr(gt, out, wt=None):
ap = voc_ap(rec, prec)
return ap, rec, prec
......@@ -58,7 +58,7 @@ def main(_):
#iteration = FLAGS.iteration
if not tf.gfile.Exists(FLAGS.input_codes):
print '\nInput codes not found.\n'
print('\nInput codes not found.\n')
return
with tf.gfile.FastGFile(FLAGS.input_codes, 'rb') as code_file:
......
......@@ -171,7 +171,7 @@ def train():
'code_length': model.average_code_length
}
np_tensors = sess.run(tf_tensors, feed_dict=feed_dict)
print np_tensors['code_length']
print(np_tensors['code_length'])
sv.Stop()
......
......@@ -18,6 +18,7 @@
import os
import numpy as np
from six.moves import xrange
import tensorflow as tf
import synthetic_model
......
......@@ -16,6 +16,7 @@
"""Binary code sample generator."""
import numpy as np
from six.moves import xrange
_CRC_LINE = [
......
......@@ -21,6 +21,7 @@ from __future__ import unicode_literals
import math
import numpy as np
import six
import tensorflow as tf
......@@ -39,7 +40,7 @@ class RsqrtInitializer(object):
1.0 / sqrt(product(shape[dims]))
**kwargs: Extra keyword arguments to pass to tf.truncated_normal.
"""
if isinstance(dims, (int, long)):
if isinstance(dims, six.integer_types):
self._dims = [dims]
else:
self._dims = dims
......@@ -73,7 +74,7 @@ class RectifierInitializer(object):
sqrt(scale / product(shape[dims])).
**kwargs: Extra keyword arguments to pass to tf.truncated_normal.
"""
if isinstance(dims, (int, long)):
if isinstance(dims, six.integer_types):
self._dims = [dims]
else:
self._dims = dims
......
......@@ -16,6 +16,7 @@
"""Define some typical masked 2D convolutions."""
import numpy as np
from six.moves import xrange
import tensorflow as tf
import block_util
......
......@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import unicode_literals
import numpy as np
from six.moves import xrange
import tensorflow as tf
import blocks_masked_conv2d
......
......@@ -22,6 +22,7 @@ import math
import os
import numpy as np
from six.moves import xrange
import tensorflow as tf
import blocks_std
......
......@@ -25,6 +25,7 @@ from __future__ import print_function
from delf import feature_pb2
from delf import datum_io
import numpy as np
from six.moves import xrange
import tensorflow as tf
......
......@@ -22,6 +22,7 @@ import sys
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
from differential_privacy.dp_sgd.dp_optimizer import dp_optimizer
......
......@@ -17,6 +17,7 @@
import collections
from six.moves import xrange
import tensorflow as tf
OrderedDict = collections.OrderedDict
......
......@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
def labels_from_probs(probs):
......@@ -127,5 +128,3 @@ def aggregation_most_frequent(logits):
result[i] = np.argmax(label_counts)
return np.asarray(result, dtype=np.int32)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment