Commit ae0a9409 authored by cclauss's avatar cclauss
Browse files

Fix Python 3 Syntax Errors (en masse)

parent eb7c6e43
......@@ -30,8 +30,8 @@ from tensorflow.python.platform import flags
import datasets.nav_env_config as nec
import datasets.nav_env as nav_env
import cv2
from datasets import factory
import render.swiftshader_renderer as renderer
from datasets import factory
import render.swiftshader_renderer as renderer
SwiftshaderRenderer = renderer.SwiftshaderRenderer
VisualNavigationEnv = nav_env.VisualNavigationEnv
......@@ -53,10 +53,10 @@ def get_args():
navtask.camera_param.width = sz
navtask.task_params.img_height = sz
navtask.task_params.img_width = sz
# navtask.task_params.semantic_task.class_map_names = ['chair', 'door', 'table']
# navtask.task_params.type = 'to_nearest_obj_acc'
logging.info('navtask: %s', navtask)
return navtask
......@@ -90,12 +90,12 @@ def walk_through(b):
root = tk.Tk()
image = b.render_nodes(b.task.nodes[[current_node],:])[0]
print image.shape
print(image.shape)
image = image.astype(np.uint8)
im = Image.fromarray(image)
im = ImageTk.PhotoImage(im)
panel = tk.Label(root, image=im)
map_size = b.traversible.shape
sc = np.max(map_size)/256.
loc = np.array([[map_size[1]/2., map_size[0]/2.]])
......@@ -128,15 +128,15 @@ def walk_through(b):
global current_node
current_node = b.take_action([current_node], [3], 1)[0][0]
refresh()
def right_key(event):
global current_node
current_node = b.take_action([current_node], [1], 1)[0][0]
refresh()
def quit(event):
root.destroy()
root.destroy()
panel_overhead.grid(row=4, column=5, rowspan=1, columnspan=1,
sticky=tk.W+tk.E+tk.N+tk.S)
panel.bind('<Left>', left_key)
......@@ -150,19 +150,19 @@ def walk_through(b):
def simple_window():
root = tk.Tk()
image = np.zeros((128, 128, 3), dtype=np.uint8)
image[32:96, 32:96, 0] = 255
im = Image.fromarray(image)
im = ImageTk.PhotoImage(im)
image = np.zeros((128, 128, 3), dtype=np.uint8)
image[32:96, 32:96, 1] = 255
im2 = Image.fromarray(image)
im2 = ImageTk.PhotoImage(im2)
panel = tk.Label(root, image=im)
def left_key(event):
panel.configure(image=im2)
panel.image = im2
......@@ -176,7 +176,7 @@ def simple_window():
panel.bind('q', quit)
panel.focus_set()
panel.pack(side = "bottom", fill = "both", expand = "yes")
root.mainloop()
root.mainloop()
def main(_):
b = load_building(FLAGS.dataset_name, FLAGS.building_name)
......
......@@ -17,7 +17,7 @@ r"""
Code for plotting trajectories in the top view, and also plot first person views
from saved trajectories. Does not run the network but only loads the mesh data
to plot the view points.
CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_plot_trajectory.py \
--first_person --num_steps 40 \
--config_name cmp.lmap_Msc.clip5.sbpd_d_r2r \
......@@ -36,13 +36,13 @@ from tensorflow.contrib import slim
import cv2
import logging
from tensorflow.python.platform import gfile
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from datasets import nav_env
import scripts.script_nav_agent_release as sna
import scripts.script_nav_agent_release as sna
import src.file_utils as fu
from src import graph_utils
from src import graph_utils
from src import utils
FLAGS = flags.FLAGS
......@@ -95,7 +95,7 @@ def _compute_hardness():
# Initialize the agent.
init_env_state = e.reset(rng_data)
gt_dist_to_goal = [e.episode.dist_to_goal[0][j][s]
gt_dist_to_goal = [e.episode.dist_to_goal[0][j][s]
for j, s in enumerate(e.episode.start_node_ids)]
for j in range(args.navtask.task_params.batch_size):
......@@ -120,15 +120,15 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(),
FLAGS.imset)
fu.makedirs(out_dir)
# Load the model so that we can render.
plt.set_cmap('gray')
samples_per_action = 8; wait_at_action = 0;
Writer = animation.writers['mencoder']
writer = Writer(fps=3*(samples_per_action+wait_at_action),
writer = Writer(fps=3*(samples_per_action+wait_at_action),
metadata=dict(artist='anonymous'), bitrate=1800)
args = sna.get_args_for_config(FLAGS.config_name + '+bench_'+FLAGS.imset)
args.navtask.logdir = None
navtask_ = copy.deepcopy(args.navtask)
......@@ -142,10 +142,10 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
R = lambda: nav_env.get_multiplexer_class(navtask_, 0)
R = R()
b = R.buildings[0]
f = [0 for _ in range(wait_at_action)] + \
[float(_)/samples_per_action for _ in range(samples_per_action)];
# Generate things for it to render.
inds_to_do = []
inds_to_do += [1, 4, 10] #1291, 1268, 1273, 1289, 1302, 1426, 1413, 1449, 1399, 1390]
......@@ -163,7 +163,7 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
# axes = [ax]
for ax in axes:
ax.set_axis_off()
node_ids = dt['all_node_ids'][i, :, 0]*1
# Prune so that last node is not repeated more than 3 times?
if np.all(node_ids[-4:] == node_ids[-1]):
......@@ -185,7 +185,7 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
node_ids_all = np.reshape(node_ids_all[:-1,:], -1)
perturbs_all = np.reshape(perturbs_all, [-1, 4])
imgs = b.render_nodes(b.task.nodes[node_ids_all,:], perturb=perturbs_all)
# Get action at each node.
actions = []
_, action_to_nodes = b.get_feasible_actions(node_ids)
......@@ -193,7 +193,7 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
action_to_node = action_to_nodes[j]
node_to_action = dict(zip(action_to_node.values(), action_to_node.keys()))
actions.append(node_to_action[node_ids[j+1]])
def init_fn():
return fig,
gt_dist_to_goal = []
......@@ -205,8 +205,8 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
img = imgs[j]; ax = axes[0]; ax.clear(); ax.set_axis_off();
img = img.astype(np.uint8); ax.imshow(img);
tt = ax.set_title(
"First Person View\n" +
"Top corners show diagnostics (distance, agents' action) not input to agent.",
"First Person View\n" +
"Top corners show diagnostics (distance, agents' action) not input to agent.",
fontsize=12)
plt.setp(tt, color='white')
......@@ -218,7 +218,7 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
fontsize=20, color='red',
transform=ax.transAxes, alpha=1.0)
t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1))
# Action to take.
action_latex = ['$\odot$ ', '$\curvearrowright$ ', '$\curvearrowleft$ ', '$\Uparrow$ ']
t = ax.text(0.99, 0.99, action_latex[actions[step_number]],
......@@ -256,7 +256,7 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
locs = np.expand_dims(locs, axis=0)
ax.plot(locs[:,0], locs[:,1], 'r.', alpha=1.0, linewidth=0, markersize=4)
tt = ax.set_title('Trajectory in topview', fontsize=14)
plt.setp(tt, color='white')
plt.setp(tt, color='white')
return fig,
line_ani = animation.FuncAnimation(fig, worker,
......@@ -265,7 +265,7 @@ def plot_trajectory_first_person(dt, orig_maps, out_dir):
tmp_file_name = 'tmp.mp4'
line_ani.save(tmp_file_name, writer=writer, savefig_kwargs={'facecolor':'black'})
out_file_name = os.path.join(out_dir, 'vis_{:04d}.mp4'.format(i))
print out_file_name
print(out_file_name)
if fu.exists(out_file_name):
gfile.Remove(out_file_name)
......@@ -280,12 +280,12 @@ def plot_trajectory(dt, hardness, orig_maps, out_dir):
out_file = os.path.join(out_dir, 'all_locs_at_t.pkl')
dt['hardness'] = hardness
utils.save_variables(out_file, dt.values(), dt.keys(), overwrite=True)
#Plot trajectories onto the maps
plt.set_cmap('gray')
for i in range(4000):
goal_loc = dt['all_goal_locs'][i, :, :]
locs = np.concatenate((dt['all_locs'][i,:,:],
locs = np.concatenate((dt['all_locs'][i,:,:],
dt['all_locs'][i,:,:]), axis=0)
xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0))
xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0))
......@@ -305,35 +305,35 @@ def plot_trajectory(dt, hardness, orig_maps, out_dir):
uniq = np.array(uniq)
all_locs = all_locs[uniq, :]
ax.plot(dt['all_locs'][i, 0, 0],
ax.plot(dt['all_locs'][i, 0, 0],
dt['all_locs'][i, 0, 1], 'b.', markersize=24)
ax.plot(dt['all_goal_locs'][i, 0, 0],
ax.plot(dt['all_goal_locs'][i, 0, 0],
dt['all_goal_locs'][i, 0, 1], 'g*', markersize=19)
ax.plot(all_locs[:,0], all_locs[:,1], 'r', alpha=0.4, linewidth=2)
ax.scatter(all_locs[:,0], all_locs[:,1],
c=5+np.arange(all_locs.shape[0])*1./all_locs.shape[0],
c=5+np.arange(all_locs.shape[0])*1./all_locs.shape[0],
cmap='Reds', s=30, linewidth=0)
ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0, aspect='equal')
ax.set_xlim([xy1[0], xy2[0]])
ax.set_ylim([xy1[1], xy2[1]])
file_name = os.path.join(out_dir, 'trajectory_{:04d}.png'.format(i))
print file_name
with fu.fopen(file_name, 'w') as f:
print(file_name)
with fu.fopen(file_name, 'w') as f:
plt.savefig(f)
plt.close(fig)
def main(_):
a = _load_trajectory()
h_dists, gt_dists, orig_maps = _compute_hardness()
hardness = 1.-h_dists*1./ gt_dists
if FLAGS.top_view:
plot_trajectory(a, hardness, orig_maps, out_dir=FLAGS.out_dir)
if FLAGS.first_person:
plot_trajectory_first_person(a, orig_maps, out_dir=FLAGS.out_dir)
if __name__ == '__main__':
app.run()
......@@ -17,6 +17,7 @@ r"""Generaly Utilities.
"""
import numpy as np, cPickle, os, time
from six.moves import xrange
import src.file_utils as fu
import logging
......@@ -93,12 +94,12 @@ def tic_toc_print(interval, string):
global tic_toc_print_time_old
if 'tic_toc_print_time_old' not in globals():
tic_toc_print_time_old = time.time()
print string
print(string)
else:
new_time = time.time()
if new_time - tic_toc_print_time_old > interval:
tic_toc_print_time_old = new_time;
print string
print(string)
def mkdir_if_missing(output_dir):
if not fu.exists(output_dir):
......@@ -126,7 +127,7 @@ def load_variables(pickle_file_name):
def voc_ap(rec, prec):
rec = rec.reshape((-1,1))
prec = prec.reshape((-1,1))
z = np.zeros((1,1))
z = np.zeros((1,1))
o = np.ones((1,1))
mrec = np.vstack((z, rec, o))
mpre = np.vstack((z, prec, z))
......@@ -165,4 +166,3 @@ def calc_pr(gt, out, wt=None):
ap = voc_ap(rec, prec)
return ap, rec, prec
......@@ -58,7 +58,7 @@ def main(_):
#iteration = FLAGS.iteration
if not tf.gfile.Exists(FLAGS.input_codes):
print '\nInput codes not found.\n'
print('\nInput codes not found.\n')
return
with tf.gfile.FastGFile(FLAGS.input_codes, 'rb') as code_file:
......
......@@ -171,7 +171,7 @@ def train():
'code_length': model.average_code_length
}
np_tensors = sess.run(tf_tensors, feed_dict=feed_dict)
print np_tensors['code_length']
print(np_tensors['code_length'])
sv.Stop()
......
......@@ -41,6 +41,7 @@ python analysis.py
import os
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
from differential_privacy.multiple_teachers.input import maybe_download
......@@ -139,7 +140,7 @@ def logmgf_exact(q, priv_eps, l):
try:
log_t = math.log(t)
except ValueError:
print "Got ValueError in math.log for values :" + str((q, priv_eps, l, t))
print("Got ValueError in math.log for values :" + str((q, priv_eps, l, t)))
log_t = priv_eps * l
else:
log_t = priv_eps * l
......@@ -171,7 +172,7 @@ def sens_at_k(counts, noise_eps, l, k):
"""
counts_sorted = sorted(counts, reverse=True)
if 0.5 * noise_eps * l > 1:
print "l too large to compute sensitivity"
print("l too large to compute sensitivity")
return 0
# Now we can assume that at k, gap remains positive
# or we have reached the point where logmgf_exact is
......@@ -268,8 +269,8 @@ def main(unused_argv):
# Solving gives eps = (alpha - ln (delta))/l
eps_list_nm = (total_log_mgf_nm - math.log(delta)) / l_list
print "Epsilons (Noisy Max): " + str(eps_list_nm)
print "Smoothed sensitivities (Noisy Max): " + str(total_ss_nm / l_list)
print("Epsilons (Noisy Max): " + str(eps_list_nm))
print("Smoothed sensitivities (Noisy Max): " + str(total_ss_nm / l_list))
# If beta < eps / 2 ln (1/delta), then adding noise Lap(1) * 2 SS/eps
# is eps,delta DP
......@@ -280,12 +281,12 @@ def main(unused_argv):
# Print the first one's scale
ss_eps = 2.0 * beta * math.log(1/delta)
ss_scale = 2.0 / ss_eps
print "To get an " + str(ss_eps) + "-DP estimate of epsilon, "
print "..add noise ~ " + str(ss_scale)
print "... times " + str(total_ss_nm / l_list)
print "Epsilon = " + str(min(eps_list_nm)) + "."
print("To get an " + str(ss_eps) + "-DP estimate of epsilon, ")
print("..add noise ~ " + str(ss_scale))
print("... times " + str(total_ss_nm / l_list))
print("Epsilon = " + str(min(eps_list_nm)) + ".")
if min(eps_list_nm) == eps_list_nm[-1]:
print "Warning: May not have used enough values of l"
print("Warning: May not have used enough values of l")
# Data independent bound, as mechanism is
# 2*noise_eps DP.
......@@ -294,7 +295,7 @@ def main(unused_argv):
[logmgf_exact(1.0, 2.0 * noise_eps, l) for l in l_list])
data_ind_eps_list = (data_ind_log_mgf - math.log(delta)) / l_list
print "Data independent bound = " + str(min(data_ind_eps_list)) + "."
print("Data independent bound = " + str(min(data_ind_eps_list)) + ".")
return
......
......@@ -40,12 +40,15 @@ To verify that the I1 >= I2 (see comments in GaussianMomentsAccountant in
accountant.py for the context), run the same loop above with verify=True
passed to compute_log_moment.
"""
from __future__ import print_function
import math
import sys
import numpy as np
import scipy.integrate as integrate
import scipy.stats
from six.moves import xrange
from sympy.mpmath import mp
......@@ -108,10 +111,10 @@ def compute_a(sigma, q, lmbd, verbose=False):
a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
q * a_lambda_second_term_exact)
if verbose:
print "A: by binomial expansion {} = {} + {}".format(
print("A: by binomial expansion {} = {} + {}".format(
a_lambda_exact,
(1.0 - q) * a_lambda_first_term_exact,
q * a_lambda_second_term_exact)
q * a_lambda_second_term_exact))
return _to_np_float64(a_lambda_exact)
......@@ -125,8 +128,8 @@ def compute_b(sigma, q, lmbd, verbose=False):
b_fn = lambda z: (np.power(mu0(z) / mu(z), lmbd) -
np.power(mu(-z) / mu0(z), lmbd))
if verbose:
print "M =", m
print "f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m))
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: (mu0(z) *
......@@ -140,9 +143,9 @@ def compute_b(sigma, q, lmbd, verbose=False):
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print "B: by numerical integration", b_lambda
print "B must be no more than ", b_bound
print b_lambda, b_bound
print("B: by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
print(b_lambda, b_bound)
return _to_np_float64(b_lambda)
......@@ -188,10 +191,10 @@ def compute_a_mp(sigma, q, lmbd, verbose=False):
a_lambda_second_term = integral_inf_mp(a_lambda_second_term_fn)
if verbose:
print "A: by numerical integration {} = {} + {}".format(
print("A: by numerical integration {} = {} + {}".format(
a_lambda,
(1 - q) * a_lambda_first_term,
q * a_lambda_second_term)
q * a_lambda_second_term))
return _to_np_float64(a_lambda)
......@@ -210,8 +213,8 @@ def compute_b_mp(sigma, q, lmbd, verbose=False):
b_fn = lambda z: ((mu0(z) / mu(z)) ** lmbd_int -
(mu(-z) / mu0(z)) ** lmbd_int)
if verbose:
print "M =", m
print "f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m))
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
......@@ -223,8 +226,8 @@ def compute_b_mp(sigma, q, lmbd, verbose=False):
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print "B by numerical integration", b_lambda
print "B must be no more than ", b_bound
print("B by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
assert b_lambda < b_bound + 1e-5
return _to_np_float64(b_lambda)
......
......@@ -17,6 +17,7 @@
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
from tensorflow.python.framework import function
......@@ -500,8 +501,10 @@ class NeuralGPU(object):
return tf.reduce_sum(encoder_outputs * tf.expand_dims(mask, 2), 1)
with tf.variable_scope("decoder"):
def decoder_loop_fn((state, prev_cell_out, _), (cell_inp, cur_tgt)):
def decoder_loop_fn(state__prev_cell_out__unused, cell_inp__cur_tgt):
"""Decoder loop function."""
state, prev_cell_out, _ = state__prev_cell_out__unused
cell_inp, cur_tgt = cell_inp__cur_tgt
attn_q = tf.layers.dense(prev_cell_out, height * nmaps,
name="attn_query")
attn_res = attention_query(attn_q, tf.get_variable(
......
......@@ -22,6 +22,7 @@ import threading
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
import program_utils
......@@ -144,7 +145,7 @@ def read_data(source_path, target_path, buckets, max_size=None, print_out=True):
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0 and print_out:
print " reading data line %d" % counter
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
......@@ -188,7 +189,7 @@ def read_data_into_global(source_path, target_path, buckets,
global_train_set["wmt"].append(data_set)
train_total_size = calculate_buckets_scale(data_set, buckets, "wmt")
if print_out:
print " Finished global data reading (%d)." % train_total_size
print(" Finished global data reading (%d)." % train_total_size)
def initialize(sess=None):
......@@ -552,7 +553,7 @@ def score_beams_prog(beams, target, inp, history, print_out=False,
for h in history]
tgt_set = set(target)
if print_out:
print "target: ", tgt_prog
print("target: ", tgt_prog)
inps, tgt_outs = [], []
for i in xrange(3):
ilist = [inp[i + 1, l] for l in xrange(inp.shape[1])]
......@@ -566,11 +567,11 @@ def score_beams_prog(beams, target, inp, history, print_out=False,
if len(olist) == 1:
tgt_outs.append(olist[0])
else:
print [program_utils.prog_vocab[x] for x in ilist if x > 0]
print olist
print tgt_prog
print program_utils.evaluate(tgt_prog, {"a": inps[-1]})
print "AAAAA"
print([program_utils.prog_vocab[x] for x in ilist if x > 0])
print(olist)
print(tgt_prog)
print(program_utils.evaluate(tgt_prog, {"a": inps[-1]}))
print("AAAAA")
tgt_outs.append(olist[0])
if not test_mode:
for _ in xrange(7):
......@@ -602,7 +603,7 @@ def score_beams_prog(beams, target, inp, history, print_out=False,
best_prog = b_prog
best_score = score
if print_out:
print "best score: ", best_score, " best prog: ", best_prog
print("best score: ", best_score, " best prog: ", best_prog)
return best, best_score
......@@ -719,7 +720,7 @@ def train():
inp = new_inp
# If all results are great, stop (todo: not to wait for all?).
if FLAGS.nprint > 1:
print scores
print(scores)
if sum(scores) / float(len(scores)) >= 10.0:
break
# The final step with the true target.
......@@ -735,7 +736,7 @@ def train():
errors, total, seq_err = data.accuracy(
inp, res, target, batch_size, 0, new_target, scores)
if FLAGS.nprint > 1:
print "seq_err: ", seq_err
print("seq_err: ", seq_err)
acc_total += total
acc_errors += errors
acc_seq_err += seq_err
......@@ -944,8 +945,8 @@ def interactive():
for v in tf.trainable_variables():
shape = v.get_shape().as_list()
total += mul(shape)
print (v.name, shape, mul(shape))
print total
print(v.name, shape, mul(shape))
print(total)
# Start interactive loop.
sys.stdout.write("Input to Neural GPU Translation Model.\n")
sys.stdout.write("> ")
......@@ -960,7 +961,7 @@ def interactive():
normalize_digits=FLAGS.normalize_digits)
else:
token_ids = wmt.sentence_to_token_ids(inpt, en_vocab)
print [rev_en_vocab[t] for t in token_ids]
print([rev_en_vocab[t] for t in token_ids])
# Which bucket does it belong to?
buckets = [b for b in xrange(len(data.bins))
if data.bins[b] >= max(len(token_ids), len(cures))]
......@@ -986,12 +987,12 @@ def interactive():
loss = loss[0] - (data.bins[bucket_id] * FLAGS.length_norm)
outputs = [int(np.argmax(logit, axis=1))
for logit in output_logits]
print [rev_fr_vocab[t] for t in outputs]
print loss, data.bins[bucket_id]
print linearize(outputs, rev_fr_vocab)
print([rev_fr_vocab[t] for t in outputs])
print(loss, data.bins[bucket_id])
print(linearize(outputs, rev_fr_vocab))
cures.append(outputs[gen_idx])
print cures
print linearize(cures, rev_fr_vocab)
print(cures)
print(linearize(cures, rev_fr_vocab))
if FLAGS.simple_tokenizer:
cur_out = outputs
if wmt.EOS_ID in cur_out:
......@@ -1002,11 +1003,11 @@ def interactive():
if loss < result_cost:
result = outputs
result_cost = loss
print ("FINAL", result_cost)
print [rev_fr_vocab[t] for t in result]
print linearize(result, rev_fr_vocab)
print("FINAL", result_cost)
print([rev_fr_vocab[t] for t in result])
print(linearize(result, rev_fr_vocab))
else:
print "TOOO_LONG"
print("TOOO_LONG")
sys.stdout.write("> ")
sys.stdout.flush()
inpt = sys.stdin.readline(), ""
......
......@@ -31,7 +31,7 @@ class ListType(object):
if not isinstance(other, ListType):
return False
return self.arg == other.arg
def __hash__(self):
return hash(self.arg)
......@@ -68,7 +68,7 @@ class FunctionType(object):
class Function(object):
def __init__(self, name, arg_types, output_type, fn_arg_types = None):
self.name = name
self.name = name
self.arg_types = arg_types
self.fn_arg_types = fn_arg_types or []
self.output_type = output_type
......@@ -113,14 +113,14 @@ def minus_one(x): return x - 1
def times_two(x): return x * 2
def neg(x): return x * (-1)
def div_two(x): return int(x/2)
def sq(x): return x**2
def sq(x): return x**2
def times_three(x): return x * 3
def div_three(x): return int(x/3)
def times_four(x): return x * 4
def div_four(x): return int(x/4)
# Int -> Bool
def pos(x): return x > 0
# Int -> Bool
def pos(x): return x > 0
def neg(x): return x < 0
def even(x): return x%2 == 0
def odd(x): return x%2 == 1
......@@ -131,24 +131,24 @@ def sub(x, y): return x - y
def mul(x, y): return x * y
# HOFs
f_map = Function("map", [ListType("Int")],
ListType("Int"),
f_map = Function("map", [ListType("Int")],
ListType("Int"),
[FunctionType(["Int", "Int"])])
f_filter = Function("filter", [ListType("Int")],
ListType("Int"),
f_filter = Function("filter", [ListType("Int")],
ListType("Int"),
[FunctionType(["Int", "Bool"])])
f_count = Function("c_count", [ListType("Int")],
"Int",
f_count = Function("c_count", [ListType("Int")],
"Int",
[FunctionType(["Int", "Bool"])])
def c_count(f, xs): return len([x for x in xs if f(x)])
f_zipwith = Function("c_zipwith", [ListType("Int"), ListType("Int")],
ListType("Int"),
f_zipwith = Function("c_zipwith", [ListType("Int"), ListType("Int")],
ListType("Int"),
[FunctionType(["Int", "Int", "Int"])]) #FIX
def c_zipwith(f, xs, ys): return [f(x, y) for (x, y) in zip(xs, ys)]
f_scan = Function("c_scan", [ListType("Int")],
ListType("Int"),
ListType("Int"),
[FunctionType(["Int", "Int", "Int"])])
def c_scan(f, xs):
out = xs
......@@ -177,7 +177,7 @@ def evaluate(program_str, input_names_to_vals, default="ERROR"):
with stdoutIO() as s:
# pylint: disable=bare-except
try:
exec exec_str + " print(out)"
exec(exec_str + " print(out)")
return s.getvalue()[:-1]
except:
return default
......@@ -186,7 +186,7 @@ def evaluate(program_str, input_names_to_vals, default="ERROR"):
class Statement(object):
"""Statement class."""
def __init__(self, fn, output_var, arg_vars, fn_args=None):
self.fn = fn
self.output_var = output_var
......@@ -290,7 +290,7 @@ class Program(object):
with stdoutIO() as s:
# pylint: disable=exec-used
exec inp_str + self.body + "; print(out)"
exec(inp_str + self.body + "; print(out)")
# pylint: enable=exec-used
return s.getvalue()[:-1]
......@@ -412,11 +412,11 @@ def gen(max_len, how_many):
else:
outcomes_to_programs[outcome_str] = t.flat_str()
if counter % 5000 == 0:
print "== proggen: tried: " + str(counter)
print "== proggen: kept: " + str(len(outcomes_to_programs))
print("== proggen: tried: " + str(counter))
print("== proggen: kept: " + str(len(outcomes_to_programs)))
if counter % 250000 == 0 and save_prefix is not None:
print "saving..."
print("saving...")
save_counter = 0
progfilename = os.path.join(save_prefix, "prog_" + str(counter) + ".txt")
iofilename = os.path.join(save_prefix, "io_" + str(counter) + ".txt")
......@@ -431,7 +431,7 @@ def gen(max_len, how_many):
for (o, p) in outcomes_to_programs.iteritems():
save_counter += 1
if save_counter % 500 == 0:
print "saving %d of %d" % (save_counter, len(outcomes_to_programs))
print("saving %d of %d" % (save_counter, len(outcomes_to_programs)))
fp.write(p+"\n")
fi.write(o+"\n")
ftp.write(str(tokenize(p, tokens))+"\n")
......
......@@ -14,6 +14,8 @@
# ==============================================================================
"""Utilities for downloading data from WMT, tokenizing, vocabularies."""
from __future__ import print_function
import gzip
import os
import re
......@@ -53,20 +55,20 @@ _WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/dev-v2.tgz"
def maybe_download(directory, filename, url):
"""Download filename from url unless it's already in directory."""
if not tf.gfile.Exists(directory):
print "Creating directory %s" % directory
print("Creating directory %s" % directory)
os.mkdir(directory)
filepath = os.path.join(directory, filename)
if not tf.gfile.Exists(filepath):
print "Downloading %s to %s" % (url, filepath)
print("Downloading %s to %s" % (url, filepath))
filepath, _ = urllib.request.urlretrieve(url, filepath)
statinfo = os.stat(filepath)
print "Successfully downloaded", filename, statinfo.st_size, "bytes"
print("Successfully downloaded", filename, statinfo.st_size, "bytes")
return filepath
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path."""
print "Unpacking %s to %s" % (gz_path, new_path)
print("Unpacking %s to %s" % (gz_path, new_path))
with gzip.open(gz_path, "rb") as gz_file:
with open(new_path, "wb") as new_file:
for line in gz_file:
......@@ -80,7 +82,7 @@ def get_wmt_enfr_train_set(directory):
tf.gfile.Exists(train_path +".en")):
corpus_file = maybe_download(directory, "training-giga-fren.tar",
_WMT_ENFR_TRAIN_URL)
print "Extracting tar file %s" % corpus_file
print("Extracting tar file %s" % corpus_file)
with tarfile.open(corpus_file, "r") as corpus_tar:
corpus_tar.extractall(directory)
gunzip_file(train_path + ".fr.gz", train_path + ".fr")
......@@ -95,7 +97,7 @@ def get_wmt_enfr_dev_set(directory):
if not (tf.gfile.Exists(dev_path + ".fr") and
tf.gfile.Exists(dev_path + ".en")):
dev_file = maybe_download(directory, "dev-v2.tgz", _WMT_ENFR_DEV_URL)
print "Extracting tgz file %s" % dev_file
print("Extracting tgz file %s" % dev_file)
with tarfile.open(dev_file, "r:gz") as dev_tar:
fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr")
en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en")
......@@ -206,7 +208,7 @@ def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not tf.gfile.Exists(vocabulary_path):
print "Creating vocabulary %s from data %s" % (vocabulary_path, data_path)
print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab, chars = {}, {}
for c in _PUNCTUATION:
chars[c] = 1
......@@ -218,7 +220,7 @@ def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
line = " ".join(line_in.split())
counter += 1
if counter % 100000 == 0:
print " processing fr line %d" % counter
print(" processing fr line %d" % counter)
for c in line:
if c in chars:
chars[c] += 1
......@@ -240,7 +242,7 @@ def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
line = " ".join(line_in.split())
counter += 1
if counter % 100000 == 0:
print " processing en line %d" % counter
print(" processing en line %d" % counter)
for c in line:
if c in chars:
chars[c] += 1
......@@ -371,7 +373,7 @@ def data_to_token_ids(data_path, target_path, vocabulary_path,
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not tf.gfile.Exists(target_path):
print "Tokenizing data in %s" % data_path
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with tf.gfile.GFile(data_path, mode="rb") as data_file:
with tf.gfile.GFile(target_path, mode="w") as tokens_file:
......@@ -379,7 +381,7 @@ def data_to_token_ids(data_path, target_path, vocabulary_path,
for line in data_file:
counter += 1
if counter % 100000 == 0:
print " tokenizing line %d" % counter
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(line, vocab, tokenizer,
normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
......
......@@ -15,6 +15,8 @@
"""Functions for constructing vocabulary, converting the examples to integer format and building the required masks for batch computation Author: aneelakantan (Arvind Neelakantan)
"""
from __future__ import print_function
import copy
import numbers
import numpy as np
......@@ -536,15 +538,15 @@ def add_special_words(utility):
utility.reverse_word_ids[utility.word_ids[
utility.entry_match_token]] = utility.entry_match_token
utility.entry_match_token_id = utility.word_ids[utility.entry_match_token]
print "entry match token: ", utility.word_ids[
utility.entry_match_token], utility.entry_match_token_id
print("entry match token: ", utility.word_ids[
utility.entry_match_token], utility.entry_match_token_id)
utility.words.append(utility.column_match_token)
utility.word_ids[utility.column_match_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.column_match_token]] = utility.column_match_token
utility.column_match_token_id = utility.word_ids[utility.column_match_token]
print "entry match token: ", utility.word_ids[
utility.column_match_token], utility.column_match_token_id
print("entry match token: ", utility.word_ids[
utility.column_match_token], utility.column_match_token_id)
utility.words.append(utility.dummy_token)
utility.word_ids[utility.dummy_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
......
......@@ -15,6 +15,8 @@
"""Author: aneelakantan (Arvind Neelakantan)
"""
from __future__ import print_function
import numpy as np
import tensorflow as tf
import nn_utils
......@@ -545,7 +547,7 @@ class Graph():
self.batch_log_prob = tf.zeros([self.batch_size], dtype=self.data_type)
#Perform max_passes and at each pass select operation and column
for curr_pass in range(max_passes):
print "step: ", curr_pass
print("step: ", curr_pass)
output, select, softmax, soft_softmax, column_softmax, soft_column_softmax = self.one_pass(
select, question_embedding, hidden_vectors, hprev, prev_select_1,
curr_pass)
......@@ -633,10 +635,10 @@ class Graph():
self.params = params
batch_size = self.batch_size
learning_rate = tf.cast(self.utility.FLAGS.learning_rate, self.data_type)
self.total_cost = self.compute_error()
self.total_cost = self.compute_error()
optimize_params = self.params.values()
optimize_names = self.params.keys()
print "optimize params ", optimize_names
print("optimize params ", optimize_names)
if (self.utility.FLAGS.l2_regularizer > 0.0):
reg_cost = 0.0
for ind_param in self.params.keys():
......@@ -645,7 +647,7 @@ class Graph():
grads = tf.gradients(self.total_cost, optimize_params, name="gradients")
grad_norm = 0.0
for p, name in zip(grads, optimize_names):
print "grads: ", p, name
print("grads: ", p, name)
if isinstance(p, tf.IndexedSlices):
grad_norm += tf.reduce_sum(p.values * p.values)
elif not (p == None):
......@@ -672,7 +674,6 @@ class Graph():
learning_rate,
epsilon=tf.cast(self.utility.FLAGS.eps, self.data_type),
use_locking=True)
self.step = adam.apply_gradients(zip(grads, optimize_params),
self.step = adam.apply_gradients(zip(grads, optimize_params),
global_step=self.global_step)
self.init_op = tf.global_variables_initializer()
......@@ -113,9 +113,9 @@ def evaluate(sess, data, batch_size, graph, i):
graph))
gc += ct * batch_size
num_examples += batch_size
print "dev set accuracy after ", i, " : ", gc / num_examples
print num_examples, len(data)
print "--------"
print("dev set accuracy after ", i, " : ", gc / num_examples)
print(num_examples, len(data))
print("--------")
def Train(graph, utility, batch_size, train_data, sess, model_dir,
......@@ -142,15 +142,15 @@ def Train(graph, utility, batch_size, train_data, sess, model_dir,
if (i > 0 and i % FLAGS.eval_cycle == 0):
end = time.time()
time_taken = end - start
print "step ", i, " ", time_taken, " seconds "
print("step ", i, " ", time_taken, " seconds ")
start = end
print " printing train set loss: ", train_set_loss / utility.FLAGS.eval_cycle
print(" printing train set loss: ", train_set_loss / utility.FLAGS.eval_cycle)
train_set_loss = 0.0
def master(train_data, dev_data, utility):
#creates TF graph and calls trainer or evaluator
batch_size = utility.FLAGS.batch_size
batch_size = utility.FLAGS.batch_size
model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/"
#create all paramters of the model
param_class = parameters.Parameters(utility)
......@@ -183,23 +183,23 @@ def master(train_data, dev_data, utility):
file_list = sorted(selected_models.items(), key=lambda x: x[0])
if (len(file_list) > 0):
file_list = file_list[0:len(file_list) - 1]
print "list of models: ", file_list
print("list of models: ", file_list)
for model_file in file_list:
model_file = model_file[1]
print "restoring: ", model_file
print("restoring: ", model_file)
saver.restore(sess, model_dir + "/" + model_file)
model_step = int(
model_file.split("_")[len(model_file.split("_")) - 1])
print "evaluating on dev ", model_file, model_step
print("evaluating on dev ", model_file, model_step)
evaluate(sess, dev_data, batch_size, graph, model_step)
else:
ckpt = tf.train.get_checkpoint_state(model_dir)
print "model dir: ", model_dir
print("model dir: ", model_dir)
if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))):
print "create dir: ", utility.FLAGS.output_dir
print("create dir: ", utility.FLAGS.output_dir)
tf.gfile.MkDir(utility.FLAGS.output_dir)
if (not (tf.gfile.IsDirectory(model_dir))):
print "create dir: ", model_dir
print("create dir: ", model_dir)
tf.gfile.MkDir(model_dir)
Train(graph, utility, batch_size, train_data, sess, model_dir,
saver)
......@@ -225,10 +225,10 @@ def main(args):
train_data = data_utils.complete_wiki_processing(train_data, utility, True)
dev_data = data_utils.complete_wiki_processing(dev_data, utility, False)
test_data = data_utils.complete_wiki_processing(test_data, utility, False)
print "# train examples ", len(train_data)
print "# dev examples ", len(dev_data)
print "# test examples ", len(test_data)
print "running open source"
print("# train examples ", len(train_data))
print("# dev examples ", len(dev_data))
print("# test examples ", len(test_data))
print("running open source")
#construct TF graph and train or evaluate
master(train_data, dev_data, utility)
......
......@@ -59,7 +59,7 @@ class Parameters:
#Biases for the gates and cell
for bias in ["i", "f", "c", "o"]:
if (bias == "f"):
print "forget gate bias"
print("forget gate bias")
params[key + "_" + bias] = tf.Variable(
tf.random_uniform([embedding_dims], 1.0, 1.1, self.utility.
tf_data_type[self.utility.FLAGS.data_type]))
......
......@@ -22,6 +22,8 @@ columns.
lookup answer (or matrix) is also split into number and word lookup matrix
Author: aneelakantan (Arvind Neelakantan)
"""
from __future__ import print_function
import math
import os
import re
......@@ -56,7 +58,7 @@ def correct_unicode(string):
#string = re.sub("[“”«»]", "\"", string)
#string = re.sub("[•†‡]", "", string)
#string = re.sub("[‐‑–—]", "-", string)
string = re.sub(ur'[\u2E00-\uFFFF]', "", string)
string = re.sub(r'[\u2E00-\uFFFF]', "", string)
string = re.sub("\\s+", " ", string).strip()
return string
......@@ -78,7 +80,7 @@ def full_normalize(string):
# Remove trailing info in brackets
string = re.sub("\[[^\]]*\]", "", string)
# Remove most unicode characters in other languages
string = re.sub(ur'[\u007F-\uFFFF]', "", string.strip())
string = re.sub(r'[\u007F-\uFFFF]', "", string.strip())
# Remove trailing info in parenthesis
string = re.sub("\([^)]*\)$", "", string.strip())
string = final_normalize(string)
......@@ -207,7 +209,7 @@ class WikiQuestionGenerator(object):
self.dev_loader = WikiQuestionLoader(dev_name, root_folder)
self.test_loader = WikiQuestionLoader(test_name, root_folder)
self.bad_examples = 0
self.root_folder = root_folder
self.root_folder = root_folder
self.data_folder = os.path.join(self.root_folder, "annotated/data")
self.annotated_examples = {}
self.annotated_tables = {}
......@@ -298,7 +300,7 @@ class WikiQuestionGenerator(object):
question_id, question, target_canon, context)
self.annotated_tables[context] = []
counter += 1
print "Annotated examples loaded ", len(self.annotated_examples)
print("Annotated examples loaded ", len(self.annotated_examples))
f.close()
def is_number_column(self, a):
......
......@@ -145,7 +145,7 @@ class VisualizationUtilsTest(tf.test.TestCase):
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
print 'Writing output image %d to %s' % (i, output_file)
print('Writing output image %d to %s' % (i, output_file))
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
......
......@@ -30,6 +30,8 @@ python celeba_formatting.py \
"""
from __future__ import print_function
import os
import os.path
......@@ -70,7 +72,7 @@ def main():
writer = tf.python_io.TFRecordWriter(file_out)
for example_idx, img_fn in enumerate(img_fn_list):
if example_idx % 1000 == 0:
print example_idx, "/", num_examples
print(example_idx, "/", num_examples)
image_raw = scipy.ndimage.imread(os.path.join(fn_root, img_fn))
rows = image_raw.shape[0]
cols = image_raw.shape[1]
......
......@@ -34,6 +34,8 @@ done
"""
from __future__ import print_function
import os
import os.path
......@@ -73,10 +75,10 @@ def main():
file_out = "%s_%05d.tfrecords"
file_out = file_out % (FLAGS.file_out,
example_idx // n_examples_per_file)
print "Writing on:", file_out
print("Writing on:", file_out)
writer = tf.python_io.TFRecordWriter(file_out)
if example_idx % 1000 == 0:
print example_idx, "/", num_examples
print(example_idx, "/", num_examples)
image_raw = scipy.ndimage.imread(os.path.join(fn_root, img_fn))
rows = image_raw.shape[0]
cols = image_raw.shape[1]
......
......@@ -29,6 +29,7 @@ python lsun_formatting.py \
--fn_root [LSUN_FOLDER]
"""
from __future__ import print_function
import os
import os.path
......@@ -68,10 +69,10 @@ def main():
file_out = "%s_%05d.tfrecords"
file_out = file_out % (FLAGS.file_out,
example_idx // n_examples_per_file)
print "Writing on:", file_out
print("Writing on:", file_out)
writer = tf.python_io.TFRecordWriter(file_out)
if example_idx % 1000 == 0:
print example_idx, "/", num_examples
print(example_idx, "/", num_examples)
image_raw = numpy.array(Image.open(os.path.join(fn_root, img_fn)))
rows = image_raw.shape[0]
cols = image_raw.shape[1]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment