"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "215db688da1661312884048cbe290910416ff616"
Commit 15a2fc88 authored by Rémi Louf's avatar Rémi Louf
Browse files

add General attention classes

The modifications that I introduced in a previous commit did break
Bert's internal API. I reverted these changes and added more general
classes to handle the encoder-decoder attention case.

There may be a more elegant way to deal with retro-compatibility (I am
not comfortable with the current state of the code), but I cannot see it
right now.
parent cd6a59d5
...@@ -174,9 +174,9 @@ class BertEmbeddings(nn.Module): ...@@ -174,9 +174,9 @@ class BertEmbeddings(nn.Module):
return embeddings return embeddings
class BertSelfAttention(nn.Module): class BertGeneralAttention(nn.Module):
def __init__(self, config): def __init__(self, config):
super(BertSelfAttention, self).__init__() super(BertGeneralAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0: if config.hidden_size % config.num_attention_heads != 0:
raise ValueError( raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention " "The hidden size (%d) is not a multiple of the number of attention "
...@@ -235,6 +235,67 @@ class BertSelfAttention(nn.Module): ...@@ -235,6 +235,67 @@ class BertSelfAttention(nn.Module):
return outputs return outputs
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask=None, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module): class BertSelfOutput(nn.Module):
def __init__(self, config): def __init__(self, config):
super(BertSelfOutput, self).__init__() super(BertSelfOutput, self).__init__()
...@@ -279,12 +340,49 @@ class BertAttention(nn.Module): ...@@ -279,12 +340,49 @@ class BertAttention(nn.Module):
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads) self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, query_tensor, key_tensor, value_tensor, attention_mask=None, head_mask=None): def forward(self, hidden_states, attention_mask=None, head_mask=None):
self_outputs = self.self(query_tensor, key_tensor, value_tensor, attention_mask, head_mask) self_outputs = self.self(hidden_states, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertDecoderAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertGeneralAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, query, key, value, attention_mask=None, head_mask=None):
self_outputs = self.self(query, key, value, attention_mask, head_mask)
# in encoder-decoder attention we use the output of the previous decoder stage as the query # in encoder-decoder attention we use the output of the previous decoder stage as the query
# in the Multi-Head Attention. We thus pass query_tensor as the residual in BertOutput. # in the Multi-Head Attention. We thus pass query_tensor as the residual in BertOutput.
# This shows the limits of the current code architecture, which may benefit from some refactoring. # This shows the limits of the current code architecture, which may benefit from some refactoring.
attention_output = self.output(self_outputs[0], query_tensor) attention_output = self.output(self_outputs[0], query)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs return outputs
...@@ -326,11 +424,7 @@ class BertEncoderLayer(nn.Module): ...@@ -326,11 +424,7 @@ class BertEncoderLayer(nn.Module):
self.output = BertOutput(config) self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None): def forward(self, hidden_states, attention_mask=None, head_mask=None):
attention_outputs = self.attention(query_tensor=hidden_states, attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
key_tensor=hidden_states,
value_tensor=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask)
attention_output = attention_outputs[0] attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output) intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output) layer_output = self.output(intermediate_output, attention_output)
...@@ -342,20 +436,16 @@ class BertDecoderLayer(nn.Module): ...@@ -342,20 +436,16 @@ class BertDecoderLayer(nn.Module):
def __init__(self, config): def __init__(self, config):
super(BertDecoderLayer, self).__init__() super(BertDecoderLayer, self).__init__()
self.self_attention = BertAttention(config) self.self_attention = BertAttention(config)
self.attention = BertAttention(config) self.attention = BertDecoderAttention(config)
self.intermediate = BertIntermediate(config) self.intermediate = BertIntermediate(config)
self.output = BertOutput(config) self.output = BertOutput(config)
def forward(self, hidden_states, encoder_outputs, attention_mask=None, head_mask=None): def forward(self, hidden_states, encoder_outputs, attention_mask=None, head_mask=None):
self_attention_outputs = self.self_attention(query_tensor=hidden_states, self_attention_outputs = self.self_attention(hidden_states, attention_mask, head_mask)
key_tensor=hidden_states,
value_tensor=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask)
self_attention_output = self_attention_outputs[0] self_attention_output = self_attention_outputs[0]
attention_outputs = self.attention(query_tensor=self_attention_output, attention_outputs = self.attention(query=self_attention_output,
key_tensor=encoder_outputs, key=encoder_outputs,
value_tensor=encoder_outputs, value=encoder_outputs,
attention_mask=attention_mask, attention_mask=attention_mask,
head_mask=head_mask) head_mask=head_mask)
attention_output = attention_outputs[0] attention_output = attention_outputs[0]
...@@ -399,10 +489,34 @@ class BertEncoder(nn.Module): ...@@ -399,10 +489,34 @@ class BertEncoder(nn.Module):
class BertDecoder(nn.Module): class BertDecoder(nn.Module):
def __init__(self, config): def __init__(self, config):
raise NotImplementedError super(BertDecoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layers = nn.ModuleList([BertEncoderLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, encoder_outputs, attention_mask=None, head_mask=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = layer_outputs[0]
def forward(self, encoder_output): # Add last layer
raise NotImplementedError if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class BertPooler(nn.Module): class BertPooler(nn.Module):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment