Unverified Commit d9b28627 authored by Julien Plu's avatar Julien Plu Committed by GitHub
Browse files

Fix TF Roberta for mixed precision training (#11675)

parent a135f595
...@@ -541,7 +541,9 @@ class TFRobertaMainLayer(tf.keras.layers.Layer): ...@@ -541,7 +541,9 @@ class TFRobertaMainLayer(tf.keras.layers.Layer):
# Since we are adding it to the raw scores before the softmax, this is # Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely. # effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(1.0, extended_attention_mask), -10000.0) one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
# Prepare head mask if needed # Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head # 1.0 in head_mask indicate we keep the head
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment