Commit 7498acc6 authored by Xin Pan's avatar Xin Pan Committed by GitHub
Browse files

Merge pull request #377 from kaiix/textsum-multigpu

Fix run textsum on a single GPU
parents 0f904081 a0de5ca9
...@@ -94,7 +94,8 @@ def _Train(model, data_batcher): ...@@ -94,7 +94,8 @@ def _Train(model, data_batcher):
save_summaries_secs=60, save_summaries_secs=60,
save_model_secs=FLAGS.checkpoint_secs, save_model_secs=FLAGS.checkpoint_secs,
global_step=model.global_step) global_step=model.global_step)
sess = sv.prepare_or_wait_for_session() sess = sv.prepare_or_wait_for_session(config=tf.ConfigProto(
allow_soft_placement=True))
running_avg_loss = 0 running_avg_loss = 0
step = 0 step = 0
while not sv.should_stop() and step < FLAGS.max_run_steps: while not sv.should_stop() and step < FLAGS.max_run_steps:
......
...@@ -105,7 +105,8 @@ class Seq2SeqAttentionModel(object): ...@@ -105,7 +105,8 @@ class Seq2SeqAttentionModel(object):
if self._num_gpus == 0: if self._num_gpus == 0:
return '' return ''
dev = '/gpu:%d' % self._cur_gpu dev = '/gpu:%d' % self._cur_gpu
self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1) if self._num_gpus > 1:
self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)
return dev return dev
def _get_gpu(self, gpu_id): def _get_gpu(self, gpu_id):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment