Unverified Commit 6c560cb3 authored by josh11b's avatar josh11b Committed by GitHub
Browse files

AllReduceCrossTowerOps -> AllReduceCrossDeviceOps

parent 780f5265
......@@ -27,7 +27,7 @@ def get_distribution_strategy(num_gpus, all_reduce_alg=None):
Args:
num_gpus: Number of GPUs to run this model.
all_reduce_alg: Specify which algorithm to use when performing all-reduce.
See tf.contrib.distribute.AllReduceCrossTowerOps for available algorithms.
See tf.contrib.distribute.AllReduceCrossDeviceOps for available algorithms.
If None, DistributionStrategy will choose based on device topology.
Returns:
......@@ -41,7 +41,7 @@ def get_distribution_strategy(num_gpus, all_reduce_alg=None):
if all_reduce_alg:
return tf.contrib.distribute.MirroredStrategy(
num_gpus=num_gpus,
cross_tower_ops=tf.contrib.distribute.AllReduceCrossTowerOps(
cross_tower_ops=tf.contrib.distribute.AllReduceCrossDeviceOps(
all_reduce_alg, num_packs=2))
else:
return tf.contrib.distribute.MirroredStrategy(num_gpus=num_gpus)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment