"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "9322576e2f49d1014fb0c00a7a7c8c34b6a5fd35"
Unverified Commit 439e7abd authored by Hwijeen Ahn's avatar Hwijeen Ahn Committed by GitHub
Browse files

use float 16 in causal mask and masked bias (#13194)

parent 8be921f9
...@@ -157,11 +157,11 @@ def convert_megatron_checkpoint(args, input_state_dict, config): ...@@ -157,11 +157,11 @@ def convert_megatron_checkpoint(args, input_state_dict, config):
) and weight_or_bias == "weight": ) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias. # Insert a tensor of 1x1xDxD bias.
causal_mask = torch.tril(torch.ones((n_embed, n_embed), dtype=torch.uint8)).view(1, 1, n_embed, n_embed) causal_mask = torch.tril(torch.ones((n_embed, n_embed), dtype=torch.float16)).view(1, 1, n_embed, n_embed)
output_state_dict[layer_name + ".attn.bias"] = causal_mask output_state_dict[layer_name + ".attn.bias"] = causal_mask
# Insert a "dummy" tensor for masked_bias. # Insert a "dummy" tensor for masked_bias.
masked_bias = torch.tensor(-1e4) masked_bias = torch.tensor(-1e4, dtype=torch.float16)
output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias
out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head) out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment