"git@developer.sourcefind.cn:OpenDAS/autoawq.git" did not exist on "2fa3a5d1a1ab2019321e898add3fcbe4898bf8cb"
Unverified Commit 2aec950c authored by Mark Daoust's avatar Mark Daoust Committed by GitHub
Browse files

Merge pull request #5058 from raymond-yuan/patch-1

minor bug fix
parents d988d710 e9dbef6b
...@@ -347,13 +347,11 @@ class Worker(threading.Thread): ...@@ -347,13 +347,11 @@ class Worker(threading.Thread):
value_loss = advantage ** 2 value_loss = advantage ** 2
# Calculate our policy loss # Calculate our policy loss
actions_one_hot = tf.one_hot(memory.actions, self.action_size, dtype=tf.float32)
policy = tf.nn.softmax(logits) policy = tf.nn.softmax(logits)
entropy = tf.reduce_sum(policy * tf.log(policy + 1e-20), axis=1) entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=policy, logits=logits)
policy_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=actions_one_hot, policy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=memory.actions,
logits=logits) logits=logits)
policy_loss *= tf.stop_gradient(advantage) policy_loss *= tf.stop_gradient(advantage)
policy_loss -= 0.01 * entropy policy_loss -= 0.01 * entropy
total_loss = tf.reduce_mean((0.5 * value_loss + policy_loss)) total_loss = tf.reduce_mean((0.5 * value_loss + policy_loss))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment