Unverified Commit 6b9d5fba authored by Toby Boyd's avatar Toby Boyd Committed by GitHub
Browse files

Merge branch 'master' into patch-1

parents 5fd687c5 5fa2a4e6
......@@ -17,6 +17,7 @@ r"""Generaly Utilities.
"""
import numpy as np, cPickle, os, time
from six.moves import xrange
import src.file_utils as fu
import logging
......@@ -93,12 +94,12 @@ def tic_toc_print(interval, string):
global tic_toc_print_time_old
if 'tic_toc_print_time_old' not in globals():
tic_toc_print_time_old = time.time()
print string
print(string)
else:
new_time = time.time()
if new_time - tic_toc_print_time_old > interval:
tic_toc_print_time_old = new_time;
print string
print(string)
def mkdir_if_missing(output_dir):
if not fu.exists(output_dir):
......@@ -126,7 +127,7 @@ def load_variables(pickle_file_name):
def voc_ap(rec, prec):
rec = rec.reshape((-1,1))
prec = prec.reshape((-1,1))
z = np.zeros((1,1))
z = np.zeros((1,1))
o = np.ones((1,1))
mrec = np.vstack((z, rec, o))
mpre = np.vstack((z, prec, z))
......@@ -165,4 +166,3 @@ def calc_pr(gt, out, wt=None):
ap = voc_ap(rec, prec)
return ap, rec, prec
......@@ -58,7 +58,7 @@ def main(_):
#iteration = FLAGS.iteration
if not tf.gfile.Exists(FLAGS.input_codes):
print '\nInput codes not found.\n'
print('\nInput codes not found.\n')
return
with tf.gfile.FastGFile(FLAGS.input_codes, 'rb') as code_file:
......
......@@ -171,7 +171,7 @@ def train():
'code_length': model.average_code_length
}
np_tensors = sess.run(tf_tensors, feed_dict=feed_dict)
print np_tensors['code_length']
print(np_tensors['code_length'])
sv.Stop()
......
......@@ -18,6 +18,7 @@
import os
import numpy as np
from six.moves import xrange
import tensorflow as tf
import synthetic_model
......
......@@ -16,6 +16,7 @@
"""Binary code sample generator."""
import numpy as np
from six.moves import xrange
_CRC_LINE = [
......
......@@ -21,6 +21,7 @@ from __future__ import unicode_literals
import math
import numpy as np
import six
import tensorflow as tf
......@@ -39,7 +40,7 @@ class RsqrtInitializer(object):
1.0 / sqrt(product(shape[dims]))
**kwargs: Extra keyword arguments to pass to tf.truncated_normal.
"""
if isinstance(dims, (int, long)):
if isinstance(dims, six.integer_types):
self._dims = [dims]
else:
self._dims = dims
......@@ -73,7 +74,7 @@ class RectifierInitializer(object):
sqrt(scale / product(shape[dims])).
**kwargs: Extra keyword arguments to pass to tf.truncated_normal.
"""
if isinstance(dims, (int, long)):
if isinstance(dims, six.integer_types):
self._dims = [dims]
else:
self._dims = dims
......
......@@ -16,6 +16,7 @@
"""Define some typical masked 2D convolutions."""
import numpy as np
from six.moves import xrange
import tensorflow as tf
import block_util
......
......@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import unicode_literals
import numpy as np
from six.moves import xrange
import tensorflow as tf
import blocks_masked_conv2d
......
......@@ -22,6 +22,7 @@ import math
import os
import numpy as np
from six.moves import xrange
import tensorflow as tf
import blocks_std
......
......@@ -25,6 +25,7 @@ from __future__ import print_function
from delf import feature_pb2
from delf import datum_io
import numpy as np
from six.moves import xrange
import tensorflow as tf
......
......@@ -22,6 +22,7 @@ import sys
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
from differential_privacy.dp_sgd.dp_optimizer import dp_optimizer
......
......@@ -17,6 +17,7 @@
import collections
from six.moves import xrange
import tensorflow as tf
OrderedDict = collections.OrderedDict
......
......@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
def labels_from_probs(probs):
......@@ -127,5 +128,3 @@ def aggregation_most_frequent(logits):
result[i] = np.argmax(label_counts)
return np.asarray(result, dtype=np.int32)
......@@ -41,6 +41,7 @@ python analysis.py
import os
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
from differential_privacy.multiple_teachers.input import maybe_download
......@@ -139,7 +140,7 @@ def logmgf_exact(q, priv_eps, l):
try:
log_t = math.log(t)
except ValueError:
print "Got ValueError in math.log for values :" + str((q, priv_eps, l, t))
print("Got ValueError in math.log for values :" + str((q, priv_eps, l, t)))
log_t = priv_eps * l
else:
log_t = priv_eps * l
......@@ -171,7 +172,7 @@ def sens_at_k(counts, noise_eps, l, k):
"""
counts_sorted = sorted(counts, reverse=True)
if 0.5 * noise_eps * l > 1:
print "l too large to compute sensitivity"
print("l too large to compute sensitivity")
return 0
# Now we can assume that at k, gap remains positive
# or we have reached the point where logmgf_exact is
......@@ -268,8 +269,8 @@ def main(unused_argv):
# Solving gives eps = (alpha - ln (delta))/l
eps_list_nm = (total_log_mgf_nm - math.log(delta)) / l_list
print "Epsilons (Noisy Max): " + str(eps_list_nm)
print "Smoothed sensitivities (Noisy Max): " + str(total_ss_nm / l_list)
print("Epsilons (Noisy Max): " + str(eps_list_nm))
print("Smoothed sensitivities (Noisy Max): " + str(total_ss_nm / l_list))
# If beta < eps / 2 ln (1/delta), then adding noise Lap(1) * 2 SS/eps
# is eps,delta DP
......@@ -280,12 +281,12 @@ def main(unused_argv):
# Print the first one's scale
ss_eps = 2.0 * beta * math.log(1/delta)
ss_scale = 2.0 / ss_eps
print "To get an " + str(ss_eps) + "-DP estimate of epsilon, "
print "..add noise ~ " + str(ss_scale)
print "... times " + str(total_ss_nm / l_list)
print "Epsilon = " + str(min(eps_list_nm)) + "."
print("To get an " + str(ss_eps) + "-DP estimate of epsilon, ")
print("..add noise ~ " + str(ss_scale))
print("... times " + str(total_ss_nm / l_list))
print("Epsilon = " + str(min(eps_list_nm)) + ".")
if min(eps_list_nm) == eps_list_nm[-1]:
print "Warning: May not have used enough values of l"
print("Warning: May not have used enough values of l")
# Data independent bound, as mechanism is
# 2*noise_eps DP.
......@@ -294,7 +295,7 @@ def main(unused_argv):
[logmgf_exact(1.0, 2.0 * noise_eps, l) for l in l_list])
data_ind_eps_list = (data_ind_log_mgf - math.log(delta)) / l_list
print "Data independent bound = " + str(min(data_ind_eps_list)) + "."
print("Data independent bound = " + str(min(data_ind_eps_list)) + ".")
return
......
......@@ -20,6 +20,7 @@ from __future__ import print_function
from datetime import datetime
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
import time
......@@ -600,5 +601,3 @@ def softmax_preds(images, ckpt_path, return_logits=False):
tf.reset_default_graph()
return preds
......@@ -24,6 +24,7 @@ import numpy as np
import os
from scipy.io import loadmat as loadmat
from six.moves import urllib
from six.moves import xrange
import sys
import tarfile
......
......@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
import tensorflow as tf
from differential_privacy.multiple_teachers import aggregation
......
......@@ -40,12 +40,15 @@ To verify that the I1 >= I2 (see comments in GaussianMomentsAccountant in
accountant.py for the context), run the same loop above with verify=True
passed to compute_log_moment.
"""
from __future__ import print_function
import math
import sys
import numpy as np
import scipy.integrate as integrate
import scipy.stats
from six.moves import xrange
from sympy.mpmath import mp
......@@ -108,10 +111,10 @@ def compute_a(sigma, q, lmbd, verbose=False):
a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
q * a_lambda_second_term_exact)
if verbose:
print "A: by binomial expansion {} = {} + {}".format(
print("A: by binomial expansion {} = {} + {}".format(
a_lambda_exact,
(1.0 - q) * a_lambda_first_term_exact,
q * a_lambda_second_term_exact)
q * a_lambda_second_term_exact))
return _to_np_float64(a_lambda_exact)
......@@ -125,8 +128,8 @@ def compute_b(sigma, q, lmbd, verbose=False):
b_fn = lambda z: (np.power(mu0(z) / mu(z), lmbd) -
np.power(mu(-z) / mu0(z), lmbd))
if verbose:
print "M =", m
print "f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m))
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: (mu0(z) *
......@@ -140,9 +143,9 @@ def compute_b(sigma, q, lmbd, verbose=False):
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print "B: by numerical integration", b_lambda
print "B must be no more than ", b_bound
print b_lambda, b_bound
print("B: by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
print(b_lambda, b_bound)
return _to_np_float64(b_lambda)
......@@ -188,10 +191,10 @@ def compute_a_mp(sigma, q, lmbd, verbose=False):
a_lambda_second_term = integral_inf_mp(a_lambda_second_term_fn)
if verbose:
print "A: by numerical integration {} = {} + {}".format(
print("A: by numerical integration {} = {} + {}".format(
a_lambda,
(1 - q) * a_lambda_first_term,
q * a_lambda_second_term)
q * a_lambda_second_term))
return _to_np_float64(a_lambda)
......@@ -210,8 +213,8 @@ def compute_b_mp(sigma, q, lmbd, verbose=False):
b_fn = lambda z: ((mu0(z) / mu(z)) ** lmbd_int -
(mu(-z) / mu0(z)) ** lmbd_int)
if verbose:
print "M =", m
print "f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m))
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
......@@ -223,8 +226,8 @@ def compute_b_mp(sigma, q, lmbd, verbose=False):
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print "B by numerical integration", b_lambda
print "B must be no more than ", b_bound
print("B by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
assert b_lambda < b_bound + 1e-5
return _to_np_float64(b_lambda)
......
......@@ -19,6 +19,7 @@
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
from domain_adaptation.datasets import dataset_factory
......
......@@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
tfgan = tf.contrib.gan
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment