Commit 3f8ea5cb authored by Neal Wu's avatar Neal Wu
Browse files

Fixes for differential_privacy

parent 0b1e767f
...@@ -216,10 +216,10 @@ def main(unused_argv): ...@@ -216,10 +216,10 @@ def main(unused_argv):
# If we are reproducing results from paper https://arxiv.org/abs/1610.05755, # If we are reproducing results from paper https://arxiv.org/abs/1610.05755,
# download the required binaries with label information. # download the required binaries with label information.
################################################################## ##################################################################
# Binaries for MNIST results # Binaries for MNIST results
paper_binaries_mnist = \ paper_binaries_mnist = \
["https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_labels.npy?raw=true", ["https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_labels.npy?raw=true",
"https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_100_indices_used_by_student.npy?raw=true"] "https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_100_indices_used_by_student.npy?raw=true"]
if FLAGS.counts_file == "mnist_250_teachers_labels.npy" \ if FLAGS.counts_file == "mnist_250_teachers_labels.npy" \
or FLAGS.indices_file == "mnist_250_teachers_100_indices_used_by_student.npy": or FLAGS.indices_file == "mnist_250_teachers_100_indices_used_by_student.npy":
...@@ -254,7 +254,7 @@ def main(unused_argv): ...@@ -254,7 +254,7 @@ def main(unused_argv):
total_log_mgf_nm = np.array([0.0 for _ in l_list]) total_log_mgf_nm = np.array([0.0 for _ in l_list])
total_ss_nm = np.array([0.0 for _ in l_list]) total_ss_nm = np.array([0.0 for _ in l_list])
noise_eps = FLAGS.noise_eps noise_eps = FLAGS.noise_eps
for i in indices: for i in indices:
total_log_mgf_nm += np.array( total_log_mgf_nm += np.array(
[logmgf_from_counts(counts_mat[i], noise_eps, l) [logmgf_from_counts(counts_mat[i], noise_eps, l)
......
...@@ -95,9 +95,9 @@ def inference(images, dropout=False): ...@@ -95,9 +95,9 @@ def inference(images, dropout=False):
# conv1 # conv1
with tf.variable_scope('conv1') as scope: with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights', kernel = _variable_with_weight_decay('weights',
shape=first_conv_shape, shape=first_conv_shape,
stddev=1e-4, stddev=1e-4,
wd=0.0) wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
...@@ -108,25 +108,25 @@ def inference(images, dropout=False): ...@@ -108,25 +108,25 @@ def inference(images, dropout=False):
# pool1 # pool1
pool1 = tf.nn.max_pool(conv1, pool1 = tf.nn.max_pool(conv1,
ksize=[1, 3, 3, 1], ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', padding='SAME',
name='pool1') name='pool1')
# norm1 # norm1
norm1 = tf.nn.lrn(pool1, norm1 = tf.nn.lrn(pool1,
4, 4,
bias=1.0, bias=1.0,
alpha=0.001 / 9.0, alpha=0.001 / 9.0,
beta=0.75, beta=0.75,
name='norm1') name='norm1')
# conv2 # conv2
with tf.variable_scope('conv2') as scope: with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights', kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 128], shape=[5, 5, 64, 128],
stddev=1e-4, stddev=1e-4,
wd=0.0) wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME') conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.1)) biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.1))
...@@ -137,18 +137,18 @@ def inference(images, dropout=False): ...@@ -137,18 +137,18 @@ def inference(images, dropout=False):
# norm2 # norm2
norm2 = tf.nn.lrn(conv2, norm2 = tf.nn.lrn(conv2,
4, 4,
bias=1.0, bias=1.0,
alpha=0.001 / 9.0, alpha=0.001 / 9.0,
beta=0.75, beta=0.75,
name='norm2') name='norm2')
# pool2 # pool2
pool2 = tf.nn.max_pool(norm2, pool2 = tf.nn.max_pool(norm2,
ksize=[1, 3, 3, 1], ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', padding='SAME',
name='pool2') name='pool2')
# local3 # local3
...@@ -156,9 +156,9 @@ def inference(images, dropout=False): ...@@ -156,9 +156,9 @@ def inference(images, dropout=False):
# Move everything into depth so we can perform a single matrix multiply. # Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1]) reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', weights = _variable_with_weight_decay('weights',
shape=[dim, 384], shape=[dim, 384],
stddev=0.04, stddev=0.04,
wd=0.004) wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name) local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
...@@ -167,9 +167,9 @@ def inference(images, dropout=False): ...@@ -167,9 +167,9 @@ def inference(images, dropout=False):
# local4 # local4
with tf.variable_scope('local4') as scope: with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', weights = _variable_with_weight_decay('weights',
shape=[384, 192], shape=[384, 192],
stddev=0.04, stddev=0.04,
wd=0.004) wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name) local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
...@@ -178,11 +178,11 @@ def inference(images, dropout=False): ...@@ -178,11 +178,11 @@ def inference(images, dropout=False):
# compute logits # compute logits
with tf.variable_scope('softmax_linear') as scope: with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', weights = _variable_with_weight_decay('weights',
[192, FLAGS.nb_labels], [192, FLAGS.nb_labels],
stddev=1/192.0, stddev=1/192.0,
wd=0.0) wd=0.0)
biases = _variable_on_cpu('biases', biases = _variable_on_cpu('biases',
[FLAGS.nb_labels], [FLAGS.nb_labels],
tf.constant_initializer(0.0)) tf.constant_initializer(0.0))
logits = tf.add(tf.matmul(local4, weights), biases, name=scope.name) logits = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
...@@ -386,7 +386,7 @@ def train_op_fun(total_loss, global_step): ...@@ -386,7 +386,7 @@ def train_op_fun(total_loss, global_step):
""" """
# Variables that affect learning rate. # Variables that affect learning rate.
nb_ex_per_train_epoch = int(60000 / FLAGS.nb_teachers) nb_ex_per_train_epoch = int(60000 / FLAGS.nb_teachers)
num_batches_per_epoch = nb_ex_per_train_epoch / FLAGS.batch_size num_batches_per_epoch = nb_ex_per_train_epoch / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * FLAGS.epochs_per_decay) decay_steps = int(num_batches_per_epoch * FLAGS.epochs_per_decay)
......
...@@ -47,7 +47,7 @@ def create_dir_if_needed(dest_directory): ...@@ -47,7 +47,7 @@ def create_dir_if_needed(dest_directory):
def maybe_download(file_urls, directory): def maybe_download(file_urls, directory):
""" """
Download a set of files in temporary local folder Download a set of files in temporary local folder
:param directory: the directory where to download :param directory: the directory where to download
:return: a tuple of filepaths corresponding to the files given as input :return: a tuple of filepaths corresponding to the files given as input
""" """
# Create directory if doesn't exist # Create directory if doesn't exist
...@@ -73,7 +73,7 @@ def maybe_download(file_urls, directory): ...@@ -73,7 +73,7 @@ def maybe_download(file_urls, directory):
result.append(filepath) result.append(filepath)
# Test if file already exists # Test if file already exists
if not gfile.Exists(filepath): if not tf.gfile.Exists(filepath):
def _progress(count, block_size, total_size): def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0)) float(count * block_size) / float(total_size) * 100.0))
...@@ -124,7 +124,7 @@ def extract_svhn(local_url): ...@@ -124,7 +124,7 @@ def extract_svhn(local_url):
:return: :return:
""" """
with gfile.Open(local_url, mode='r') as file_obj: with tf.gfile.Open(local_url, mode='r') as file_obj:
# Load MATLAB matrix using scipy IO # Load MATLAB matrix using scipy IO
dict = loadmat(file_obj) dict = loadmat(file_obj)
......
...@@ -64,11 +64,11 @@ def train_teacher(dataset, nb_teachers, teacher_id): ...@@ -64,11 +64,11 @@ def train_teacher(dataset, nb_teachers, teacher_id):
else: else:
print("Check value of dataset flag") print("Check value of dataset flag")
return False return False
# Retrieve subset of data for this teacher # Retrieve subset of data for this teacher
data, labels = input.partition_dataset(train_data, data, labels = input.partition_dataset(train_data,
train_labels, train_labels,
nb_teachers, nb_teachers,
teacher_id) teacher_id)
print("Length of training data: " + str(len(labels))) print("Length of training data: " + str(len(labels)))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment