Commit afb2291b authored by Xin Pan's avatar Xin Pan Committed by GitHub
Browse files

Merge pull request #1031 from tensorflow/update-resnet

Update resnet model README, API calls, and evaluation code
parents c4a96bc3 64254ad3
...@@ -93,6 +93,9 @@ bazel-bin/resnet/resnet_main --train_data_path=cifar10/data_batch* \ ...@@ -93,6 +93,9 @@ bazel-bin/resnet/resnet_main --train_data_path=cifar10/data_batch* \
--dataset='cifar10' \ --dataset='cifar10' \
--num_gpus=1 --num_gpus=1
# While the model is training, you can also check on its progress using tensorboard:
tensorboard --logdir=/tmp/resnet_model
# Evaluate the model. # Evaluate the model.
# Avoid running on the same GPU as the training job at the same time, # Avoid running on the same GPU as the training job at the same time,
# otherwise, you might run out of memory. # otherwise, you might run out of memory.
......
...@@ -128,7 +128,6 @@ def evaluate(hps): ...@@ -128,7 +128,6 @@ def evaluate(hps):
best_precision = 0.0 best_precision = 0.0
while True: while True:
time.sleep(60)
try: try:
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)
except tf.errors.OutOfRangeError as e: except tf.errors.OutOfRangeError as e:
...@@ -163,13 +162,15 @@ def evaluate(hps): ...@@ -163,13 +162,15 @@ def evaluate(hps):
tag='Best Precision', simple_value=best_precision) tag='Best Precision', simple_value=best_precision)
summary_writer.add_summary(best_precision_summ, train_step) summary_writer.add_summary(best_precision_summ, train_step)
summary_writer.add_summary(summaries, train_step) summary_writer.add_summary(summaries, train_step)
tf.logging.info('loss: %.3f, precision: %.3f, best precision: %.3f\n' % tf.logging.info('loss: %.3f, precision: %.3f, best precision: %.3f' %
(loss, precision, best_precision)) (loss, precision, best_precision))
summary_writer.flush() summary_writer.flush()
if FLAGS.eval_once: if FLAGS.eval_once:
break break
time.sleep(60)
def main(_): def main(_):
if FLAGS.num_gpus == 0: if FLAGS.num_gpus == 0:
...@@ -207,4 +208,5 @@ def main(_): ...@@ -207,4 +208,5 @@ def main(_):
if __name__ == '__main__': if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run() tf.app.run()
...@@ -183,8 +183,8 @@ class ResNet(object): ...@@ -183,8 +183,8 @@ class ResNet(object):
'moving_variance', params_shape, tf.float32, 'moving_variance', params_shape, tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32), initializer=tf.constant_initializer(1.0, tf.float32),
trainable=False) trainable=False)
tf.histogram_summary(mean.op.name, mean) tf.summary.histogram(mean.op.name, mean)
tf.histogram_summary(variance.op.name, variance) tf.summary.histogram(variance.op.name, variance)
# elipson used to be 1e-5. Maybe 0.001 solves NaN problem in deeper net. # elipson used to be 1e-5. Maybe 0.001 solves NaN problem in deeper net.
y = tf.nn.batch_normalization( y = tf.nn.batch_normalization(
x, mean, variance, beta, gamma, 0.001) x, mean, variance, beta, gamma, 0.001)
...@@ -221,7 +221,7 @@ class ResNet(object): ...@@ -221,7 +221,7 @@ class ResNet(object):
[(out_filter-in_filter)//2, (out_filter-in_filter)//2]]) [(out_filter-in_filter)//2, (out_filter-in_filter)//2]])
x += orig_x x += orig_x
tf.logging.info('image after unit %s', x.get_shape()) tf.logging.debug('image after unit %s', x.get_shape())
return x return x
def _bottleneck_residual(self, x, in_filter, out_filter, stride, def _bottleneck_residual(self, x, in_filter, out_filter, stride,
...@@ -265,7 +265,7 @@ class ResNet(object): ...@@ -265,7 +265,7 @@ class ResNet(object):
for var in tf.trainable_variables(): for var in tf.trainable_variables():
if var.op.name.find(r'DW') > 0: if var.op.name.find(r'DW') > 0:
costs.append(tf.nn.l2_loss(var)) costs.append(tf.nn.l2_loss(var))
# tf.histogram_summary(var.op.name, var) # tf.summary.histogram(var.op.name, var)
return tf.multiply(self.hps.weight_decay_rate, tf.add_n(costs)) return tf.multiply(self.hps.weight_decay_rate, tf.add_n(costs))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment