Commit 996909d1 authored by Neal Wu's avatar Neal Wu Committed by GitHub
Browse files

Merge pull request #1042 from jo8hua/initializer_update

Updated deprecated tf.initialize_all_variables() with tf.global_varia…
parents afdcf7d4 31f1af58
...@@ -21,7 +21,7 @@ class Autoencoder(object): ...@@ -21,7 +21,7 @@ class Autoencoder(object):
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0)) self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost) self.optimizer = optimizer.minimize(self.cost)
init = tf.initialize_all_variables() init = tf.global_variables_initializer()
self.sess = tf.Session() self.sess = tf.Session()
self.sess.run(init) self.sess.run(init)
......
...@@ -25,7 +25,7 @@ class AdditiveGaussianNoiseAutoencoder(object): ...@@ -25,7 +25,7 @@ class AdditiveGaussianNoiseAutoencoder(object):
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0)) self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost) self.optimizer = optimizer.minimize(self.cost)
init = tf.initialize_all_variables() init = tf.global_variables_initializer()
self.sess = tf.Session() self.sess = tf.Session()
self.sess.run(init) self.sess.run(init)
...@@ -92,7 +92,7 @@ class MaskingNoiseAutoencoder(object): ...@@ -92,7 +92,7 @@ class MaskingNoiseAutoencoder(object):
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0)) self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost) self.optimizer = optimizer.minimize(self.cost)
init = tf.initialize_all_variables() init = tf.global_variables_initializer()
self.sess = tf.Session() self.sess = tf.Session()
self.sess.run(init) self.sess.run(init)
......
...@@ -30,7 +30,7 @@ class VariationalAutoencoder(object): ...@@ -30,7 +30,7 @@ class VariationalAutoencoder(object):
self.cost = tf.reduce_mean(reconstr_loss + latent_loss) self.cost = tf.reduce_mean(reconstr_loss + latent_loss)
self.optimizer = optimizer.minimize(self.cost) self.optimizer = optimizer.minimize(self.cost)
init = tf.initialize_all_variables() init = tf.global_variables_initializer()
self.sess = tf.Session() self.sess = tf.Session()
self.sess.run(init) self.sess.run(init)
......
...@@ -344,7 +344,7 @@ def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps, ...@@ -344,7 +344,7 @@ def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,
# We need to maintain the intialization sequence. # We need to maintain the intialization sequence.
for v in tf.trainable_variables(): for v in tf.trainable_variables():
sess.run(tf.initialize_variables([v])) sess.run(tf.initialize_variables([v]))
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
sess.run(init_ops) sess.run(init_ops)
results = [] results = []
......
...@@ -490,7 +490,7 @@ def train(images, labels, ckpt_path, dropout=False): ...@@ -490,7 +490,7 @@ def train(images, labels, ckpt_path, dropout=False):
print("Graph constructed and saver created") print("Graph constructed and saver created")
# Build an initialization operation to run below. # Build an initialization operation to run below.
init = tf.initialize_all_variables() init = tf.global_variables_initializer()
# Create and init sessions # Create and init sessions
sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)) #NOLINT(long-line) sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)) #NOLINT(long-line)
......
...@@ -124,7 +124,7 @@ class MomentsAccountant(object): ...@@ -124,7 +124,7 @@ class MomentsAccountant(object):
"""Privacy accountant which keeps track of moments of privacy loss. """Privacy accountant which keeps track of moments of privacy loss.
Note: The constructor of this class creates tf.Variables that must Note: The constructor of this class creates tf.Variables that must
be initialized with tf.initialize_all_variables() or similar calls. be initialized with tf.global_variables_initializer() or similar calls.
MomentsAccountant accumulates the high moments of the privacy loss. It MomentsAccountant accumulates the high moments of the privacy loss. It
requires a method for computing differenital moments of the noise (See requires a method for computing differenital moments of the noise (See
......
...@@ -236,7 +236,7 @@ def train(target, dataset, cluster_spec): ...@@ -236,7 +236,7 @@ def train(target, dataset, cluster_spec):
summary_op = tf.merge_all_summaries() summary_op = tf.merge_all_summaries()
# Build an initialization operation to run below. # Build an initialization operation to run below.
init_op = tf.initialize_all_variables() init_op = tf.global_variables_initializer()
# We run the summaries in the same thread as the training operations by # We run the summaries in the same thread as the training operations by
# passing in None for summary_op to avoid a summary_thread being started. # passing in None for summary_op to avoid a summary_thread being started.
......
...@@ -307,7 +307,7 @@ def train(dataset): ...@@ -307,7 +307,7 @@ def train(dataset):
summary_op = tf.merge_summary(summaries) summary_op = tf.merge_summary(summaries)
# Build an initialization operation to run below. # Build an initialization operation to run below.
init = tf.initialize_all_variables() init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to # Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU # True to build towers on GPU, as some of the ops do not have GPU
......
...@@ -95,7 +95,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -95,7 +95,7 @@ class InceptionTest(tf.test.TestCase):
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes]) [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -108,7 +108,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -108,7 +108,7 @@ class InceptionTest(tf.test.TestCase):
logits, _ = inception.inception_v3(eval_inputs, num_classes, logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -125,7 +125,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -125,7 +125,7 @@ class InceptionTest(tf.test.TestCase):
logits, _ = inception.inception_v3(eval_inputs, num_classes, logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
......
...@@ -128,7 +128,7 @@ class ConvTest(tf.test.TestCase): ...@@ -128,7 +128,7 @@ class ConvTest(tf.test.TestCase):
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0] wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEquals(wd.op.name, self.assertEquals(wd.op.name,
'Conv/weights/Regularizer/L2Regularizer/value') 'Conv/weights/Regularizer/L2Regularizer/value')
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
self.assertTrue(sess.run(wd) <= 0.01) self.assertTrue(sess.run(wd) <= 0.01)
def testCreateConvWithoutWD(self): def testCreateConvWithoutWD(self):
...@@ -254,7 +254,7 @@ class FCTest(tf.test.TestCase): ...@@ -254,7 +254,7 @@ class FCTest(tf.test.TestCase):
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0] wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEquals(wd.op.name, self.assertEquals(wd.op.name,
'FC/weights/Regularizer/L2Regularizer/value') 'FC/weights/Regularizer/L2Regularizer/value')
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
self.assertTrue(sess.run(wd) <= 0.01) self.assertTrue(sess.run(wd) <= 0.01)
def testCreateFCWithoutWD(self): def testCreateFCWithoutWD(self):
...@@ -604,7 +604,7 @@ class BatchNormTest(tf.test.TestCase): ...@@ -604,7 +604,7 @@ class BatchNormTest(tf.test.TestCase):
barrier = tf.no_op(name='gradient_barrier') barrier = tf.no_op(name='gradient_barrier')
output = control_flow_ops.with_dependencies([barrier], output) output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables # Initialize all variables
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance]) mean, variance = sess.run([moving_mean, moving_variance])
...@@ -634,7 +634,7 @@ class BatchNormTest(tf.test.TestCase): ...@@ -634,7 +634,7 @@ class BatchNormTest(tf.test.TestCase):
barrier = tf.no_op(name='gradient_barrier') barrier = tf.no_op(name='gradient_barrier')
output = control_flow_ops.with_dependencies([barrier], output) output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables # Initialize all variables
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance]) mean, variance = sess.run([moving_mean, moving_variance])
...@@ -668,7 +668,7 @@ class BatchNormTest(tf.test.TestCase): ...@@ -668,7 +668,7 @@ class BatchNormTest(tf.test.TestCase):
barrier = tf.no_op(name='gradient_barrier') barrier = tf.no_op(name='gradient_barrier')
output = control_flow_ops.with_dependencies([barrier], output) output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables # Initialize all variables
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance]) mean, variance = sess.run([moving_mean, moving_variance])
......
...@@ -160,7 +160,7 @@ def train(data_dir, checkpoint_path, config): ...@@ -160,7 +160,7 @@ def train(data_dir, checkpoint_path, config):
with tf.variable_scope("model", reuse=None, initializer=initializer): with tf.variable_scope("model", reuse=None, initializer=initializer):
m = NamignizerModel(is_training=True, config=config) m = NamignizerModel(is_training=True, config=config)
tf.initialize_all_variables().run() tf.global_variables_initializer().run()
for i in range(config.max_max_epoch): for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0) lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
......
...@@ -377,7 +377,7 @@ def initialize(sess=None): ...@@ -377,7 +377,7 @@ def initialize(sess=None):
% ckpt.model_checkpoint_path) % ckpt.model_checkpoint_path)
model.saver.restore(sess, ckpt.model_checkpoint_path) model.saver.restore(sess, ckpt.model_checkpoint_path)
elif sv is None: elif sv is None:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
data.print_out("Initialized variables (no supervisor mode).") data.print_out("Initialized variables (no supervisor mode).")
elif FLAGS.task < 1 and FLAGS.mem_size > 0: elif FLAGS.task < 1 and FLAGS.mem_size > 0:
# sess.run(model.mem_norm_op) # sess.run(model.mem_norm_op)
......
...@@ -674,5 +674,5 @@ class Graph(): ...@@ -674,5 +674,5 @@ class Graph():
use_locking=True) use_locking=True)
self.step = adam.apply_gradients(zip(grads, optimize_params), self.step = adam.apply_gradients(zip(grads, optimize_params),
global_step=self.global_step) global_step=self.global_step)
self.init_op = tf.initialize_all_variables() self.init_op = tf.global_variables_initializer()
...@@ -502,7 +502,7 @@ class DeployTest(tf.test.TestCase): ...@@ -502,7 +502,7 @@ class DeployTest(tf.test.TestCase):
self.assertEqual(model.train_op.op.name, 'train_op') self.assertEqual(model.train_op.op.name, 'train_op')
with tf.Session() as sess: with tf.Session() as sess:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
moving_mean = tf.contrib.framework.get_variables_by_name( moving_mean = tf.contrib.framework.get_variables_by_name(
'moving_mean')[0] 'moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables_by_name( moving_variance = tf.contrib.framework.get_variables_by_name(
......
...@@ -137,7 +137,7 @@ class AlexnetV2Test(tf.test.TestCase): ...@@ -137,7 +137,7 @@ class AlexnetV2Test(tf.test.TestCase):
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs) logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(logits) output = sess.run(logits)
self.assertTrue(output.any()) self.assertTrue(output.any())
......
...@@ -95,7 +95,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -95,7 +95,7 @@ class InceptionTest(tf.test.TestCase):
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes]) [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -109,7 +109,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -109,7 +109,7 @@ class InceptionTest(tf.test.TestCase):
num_classes, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -127,7 +127,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -127,7 +127,7 @@ class InceptionTest(tf.test.TestCase):
is_training=False, is_training=False,
reuse=True) reuse=True)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
......
...@@ -140,7 +140,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -140,7 +140,7 @@ class InceptionV1Test(tf.test.TestCase):
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Mixed_5c'] pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np} feed_dict = {inputs: input_np}
tf.initialize_all_variables().run() tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024]) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
...@@ -157,7 +157,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -157,7 +157,7 @@ class InceptionV1Test(tf.test.TestCase):
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -172,7 +172,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -172,7 +172,7 @@ class InceptionV1Test(tf.test.TestCase):
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -189,7 +189,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -189,7 +189,7 @@ class InceptionV1Test(tf.test.TestCase):
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
...@@ -201,7 +201,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -201,7 +201,7 @@ class InceptionV1Test(tf.test.TestCase):
spatial_squeeze=False) spatial_squeeze=False)
with self.test_session() as sess: with self.test_session() as sess:
tf.initialize_all_variables().run() tf.global_variables_initializer().run()
logits_out = sess.run(logits) logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
......
...@@ -192,7 +192,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -192,7 +192,7 @@ class InceptionV2Test(tf.test.TestCase):
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Mixed_5c'] pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np} feed_dict = {inputs: input_np}
tf.initialize_all_variables().run() tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024]) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
...@@ -209,7 +209,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -209,7 +209,7 @@ class InceptionV2Test(tf.test.TestCase):
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -224,7 +224,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -224,7 +224,7 @@ class InceptionV2Test(tf.test.TestCase):
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -241,7 +241,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -241,7 +241,7 @@ class InceptionV2Test(tf.test.TestCase):
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
...@@ -253,7 +253,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -253,7 +253,7 @@ class InceptionV2Test(tf.test.TestCase):
spatial_squeeze=False) spatial_squeeze=False)
with self.test_session() as sess: with self.test_session() as sess:
tf.initialize_all_variables().run() tf.global_variables_initializer().run()
logits_out = sess.run(logits) logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
......
...@@ -221,7 +221,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -221,7 +221,7 @@ class InceptionV3Test(tf.test.TestCase):
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Mixed_7c'] pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np} feed_dict = {inputs: input_np}
tf.initialize_all_variables().run() tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048]) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
...@@ -238,7 +238,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -238,7 +238,7 @@ class InceptionV3Test(tf.test.TestCase):
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -253,7 +253,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -253,7 +253,7 @@ class InceptionV3Test(tf.test.TestCase):
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -271,7 +271,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -271,7 +271,7 @@ class InceptionV3Test(tf.test.TestCase):
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
...@@ -283,7 +283,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -283,7 +283,7 @@ class InceptionV3Test(tf.test.TestCase):
spatial_squeeze=False) spatial_squeeze=False)
with self.test_session() as sess: with self.test_session() as sess:
tf.initialize_all_variables().run() tf.global_variables_initializer().run()
logits_out = sess.run(logits) logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
......
...@@ -175,7 +175,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -175,7 +175,7 @@ class InceptionTest(tf.test.TestCase):
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes]) [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -189,7 +189,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -189,7 +189,7 @@ class InceptionTest(tf.test.TestCase):
num_classes, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -207,7 +207,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -207,7 +207,7 @@ class InceptionTest(tf.test.TestCase):
is_training=False, is_training=False,
reuse=True) reuse=True)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(logits, 1)
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment