Commit 996909d1 authored by Neal Wu's avatar Neal Wu Committed by GitHub
Browse files

Merge pull request #1042 from jo8hua/initializer_update

Updated deprecated tf.initialize_all_variables() with tf.global_varia…
parents afdcf7d4 31f1af58
......@@ -137,7 +137,7 @@ class OverFeatTest(tf.test.TestCase):
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs)
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
......
......@@ -104,7 +104,7 @@ class ResnetUtilsTest(tf.test.TestCase):
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
......@@ -145,7 +145,7 @@ class ResnetUtilsTest(tf.test.TestCase):
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
......@@ -240,7 +240,7 @@ class ResnetUtilsTest(tf.test.TestCase):
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
......@@ -388,7 +388,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False)
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
......@@ -407,7 +407,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
......@@ -422,7 +422,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
......@@ -441,7 +441,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
......
......@@ -104,7 +104,7 @@ class ResnetUtilsTest(tf.test.TestCase):
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
......@@ -145,7 +145,7 @@ class ResnetUtilsTest(tf.test.TestCase):
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
......@@ -240,7 +240,7 @@ class ResnetUtilsTest(tf.test.TestCase):
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
......@@ -390,7 +390,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
expected, _ = self._resnet_small(inputs, None,
is_training=False,
global_pool=False)
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
......@@ -409,7 +409,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
......@@ -425,7 +425,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
......@@ -444,7 +444,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
......
......@@ -148,7 +148,7 @@ class VGGATest(tf.test.TestCase):
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs)
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
......@@ -292,7 +292,7 @@ class VGG16Test(tf.test.TestCase):
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs)
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
......@@ -447,7 +447,7 @@ class VGG19Test(tf.test.TestCase):
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs)
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
......
......@@ -300,7 +300,7 @@
" # This is good for training, but not for testing.\n",
" total_loss2 = slim.losses.get_total_loss(add_regularization_losses=True)\n",
" \n",
" init_op = tf.initialize_all_variables()\n",
" init_op = tf.global_variables_initializer()\n",
" \n",
" with tf.Session() as sess:\n",
" sess.run(init_op) # Will initialize the parameters with random weights.\n",
......@@ -568,7 +568,7 @@
" probabilities = tf.nn.softmax(logits)\n",
" \n",
" # Initialize all the variables (including parameters) randomly.\n",
" init_op = tf.initialize_all_variables()\n",
" init_op = tf.global_variables_initializer()\n",
" \n",
" with tf.Session() as sess:\n",
" # Run the init_op, evaluate the model outputs and print the results:\n",
......
......@@ -180,7 +180,7 @@ class VgslModelTest(tf.test.TestCase):
filename,
model_spec='4,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lfxs16]O0s12',
mode='train')
tf.initialize_all_variables().run(session=sess)
tf.global_variables_initializer().run(session=sess)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
_, step = model.TrainAStep(sess)
......@@ -204,7 +204,7 @@ class VgslModelTest(tf.test.TestCase):
filename,
model_spec='2,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lbx100]O1c105',
mode='train')
tf.initialize_all_variables().run(session=sess)
tf.global_variables_initializer().run(session=sess)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
_, step = model.TrainAStep(sess)
......@@ -228,7 +228,7 @@ class VgslModelTest(tf.test.TestCase):
filename,
model_spec='8,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lfx64 Lrx64 Lfx64]O1s12',
mode='train')
tf.initialize_all_variables().run(session=sess)
tf.global_variables_initializer().run(session=sess)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
_, step = model.TrainAStep(sess)
......
......@@ -55,7 +55,7 @@ class VgslspecsTest(tf.test.TestCase):
target_widths = tf.div(self.in_widths, factor).eval()
target_heights = tf.div(self.in_heights, factor).eval()
# Run with the 'real' data.
tf.initialize_all_variables().run()
tf.global_variables_initializer().run()
res_image, res_widths, res_heights = sess.run(
[outputs, vgsl.GetLengths(2), vgsl.GetLengths(1)],
feed_dict={self.ph_image: self.in_image,
......
......@@ -135,7 +135,7 @@ accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
# %% We now create a new session to actually perform the initialization the
# variables:
sess = tf.Session()
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
# %% We'll now train in minibatches and report accuracy, loss:
......
......@@ -55,7 +55,7 @@ with tf.variable_scope('spatial_transformer_0'):
# %% Run session
sess = tf.Session()
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
y = sess.run(h_trans, feed_dict={x: batch})
# plt.imshow(y[0])
......@@ -207,7 +207,7 @@ def main(unused_argv):
saver.restore(sess, FLAGS.pretrained_model)
tf.train.start_queue_runners(sess)
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
tf.logging.info('iteration number, cost')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment