Commit 3e0fa932 authored by xinliupitt's avatar xinliupitt
Browse files

whitespace

parent d39321b1
...@@ -521,7 +521,7 @@ class CachedAttention(MultiHeadAttention): ...@@ -521,7 +521,7 @@ class CachedAttention(MultiHeadAttention):
if cache: if cache:
key, value = self._update_cache(key, value, cache, decode_loop_step) key, value = self._update_cache(key, value, cache, decode_loop_step)
query = tf.multiply(query,1.0 / math.sqrt(float(self._key_size))) query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_size)))
# Take the dot product between "query" and "key" to get the raw # Take the dot product between "query" and "key" to get the raw
# attention scores. # attention scores.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment