Commit 99462f6d authored by Neal Wu's avatar Neal Wu Committed by GitHub
Browse files

Merge pull request #760 from stakemura/master

Python 3 support for some inception scripts
parents 4de34a4c 3e93722a
...@@ -247,7 +247,7 @@ def _process_image_files_batch(coder, thread_index, ranges, name, filenames, ...@@ -247,7 +247,7 @@ def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0 counter = 0
for s in xrange(num_shards_per_batch): for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010' # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
...@@ -300,7 +300,7 @@ def _process_image_files(name, filenames, texts, labels, num_shards): ...@@ -300,7 +300,7 @@ def _process_image_files(name, filenames, texts, labels, num_shards):
# Break all images into batches with a [ranges[i][0], ranges[i][1]]. # Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = [] ranges = []
for i in xrange(len(spacing) - 1): for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]]) ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch. # Launch a thread for each batch.
...@@ -314,7 +314,7 @@ def _process_image_files(name, filenames, texts, labels, num_shards): ...@@ -314,7 +314,7 @@ def _process_image_files(name, filenames, texts, labels, num_shards):
coder = ImageCoder() coder = ImageCoder()
threads = [] threads = []
for thread_index in xrange(len(ranges)): for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames, args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards) texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args) t = threading.Thread(target=_process_image_files_batch, args=args)
...@@ -386,7 +386,7 @@ def _find_image_files(data_dir, labels_file): ...@@ -386,7 +386,7 @@ def _find_image_files(data_dir, labels_file):
# Shuffle the ordering of all image files in order to guarantee # Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the # random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable. # saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames)) shuffled_index = list(range(len(filenames)))
random.seed(12345) random.seed(12345)
random.shuffle(shuffled_index) random.shuffle(shuffled_index)
......
...@@ -370,7 +370,7 @@ def _process_image_files_batch(coder, thread_index, ranges, name, filenames, ...@@ -370,7 +370,7 @@ def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0 counter = 0
for s in xrange(num_shards_per_batch): for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010' # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
...@@ -434,7 +434,7 @@ def _process_image_files(name, filenames, synsets, labels, humans, ...@@ -434,7 +434,7 @@ def _process_image_files(name, filenames, synsets, labels, humans,
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = [] ranges = []
threads = [] threads = []
for i in xrange(len(spacing) - 1): for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]]) ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch. # Launch a thread for each batch.
...@@ -448,7 +448,7 @@ def _process_image_files(name, filenames, synsets, labels, humans, ...@@ -448,7 +448,7 @@ def _process_image_files(name, filenames, synsets, labels, humans,
coder = ImageCoder() coder = ImageCoder()
threads = [] threads = []
for thread_index in xrange(len(ranges)): for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames, args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards) synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args) t = threading.Thread(target=_process_image_files_batch, args=args)
...@@ -524,7 +524,7 @@ def _find_image_files(data_dir, labels_file): ...@@ -524,7 +524,7 @@ def _find_image_files(data_dir, labels_file):
# Shuffle the ordering of all image files in order to guarantee # Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the # random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable. # saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames)) shuffled_index = list(range(len(filenames)))
random.seed(12345) random.seed(12345)
random.shuffle(shuffled_index) random.shuffle(shuffled_index)
......
...@@ -72,7 +72,7 @@ if __name__ == '__main__': ...@@ -72,7 +72,7 @@ if __name__ == '__main__':
os.makedirs(labeled_data_dir) os.makedirs(labeled_data_dir)
# Move all of the image to the appropriate sub-directory. # Move all of the image to the appropriate sub-directory.
for i in xrange(len(labels)): for i in range(len(labels)):
basename = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1) basename = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1)
original_filename = os.path.join(data_dir, basename) original_filename = os.path.join(data_dir, basename)
if not os.path.exists(original_filename): if not os.path.exists(original_filename):
......
...@@ -128,7 +128,7 @@ def ProcessXMLAnnotation(xml_file): ...@@ -128,7 +128,7 @@ def ProcessXMLAnnotation(xml_file):
num_boxes = FindNumberBoundingBoxes(root) num_boxes = FindNumberBoundingBoxes(root)
boxes = [] boxes = []
for index in xrange(num_boxes): for index in range(num_boxes):
box = BoundingBox() box = BoundingBox()
# Grab the 'index' annotation. # Grab the 'index' annotation.
box.xmin = GetInt('xmin', root, index) box.xmin = GetInt('xmin', root, index)
......
...@@ -229,7 +229,7 @@ def train(dataset): ...@@ -229,7 +229,7 @@ def train(dataset):
# Calculate the gradients for each model tower. # Calculate the gradients for each model tower.
tower_grads = [] tower_grads = []
reuse_variables = None reuse_variables = None
for i in xrange(FLAGS.num_gpus): for i in range(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i): with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope: with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:
# Force all Variables to reside on the CPU. # Force all Variables to reside on the CPU.
...@@ -333,7 +333,7 @@ def train(dataset): ...@@ -333,7 +333,7 @@ def train(dataset):
FLAGS.train_dir, FLAGS.train_dir,
graph_def=sess.graph.as_graph_def(add_shapes=True)) graph_def=sess.graph.as_graph_def(add_shapes=True))
for step in xrange(FLAGS.max_steps): for step in range(FLAGS.max_steps):
start_time = time.time() start_time = time.time()
_, loss_value = sess.run([train_op, loss]) _, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time duration = time.time() - start_time
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment