multihead_attention.py 13.9 KB
Newer Older
1
# Copyright (c) Facebook, Inc. and its affiliates.
Myle Ott's avatar
Myle Ott committed
2
#
3
4
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
Myle Ott's avatar
Myle Ott committed
5
6
7
8
9
10

import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F

Myle Ott's avatar
Myle Ott committed
11
12
from fairseq import utils

Myle Ott's avatar
Myle Ott committed
13
14
15
16
17
18

class MultiheadAttention(nn.Module):
    """Multi-headed attention.

    See "Attention Is All You Need" for more details.
    """
19

Sergey Edunov's avatar
Sergey Edunov committed
20
21
22
    def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
                 add_bias_kv=False, add_zero_attn=False, self_attention=False,
                 encoder_decoder_attention=False):
Myle Ott's avatar
Myle Ott committed
23
24
        super().__init__()
        self.embed_dim = embed_dim
Myle Ott's avatar
Myle Ott committed
25
26
27
28
        self.kdim = kdim if kdim is not None else embed_dim
        self.vdim = vdim if vdim is not None else embed_dim
        self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim

Myle Ott's avatar
Myle Ott committed
29
30
31
        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
32
        assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
33
        self.scaling = self.head_dim ** -0.5
Myle Ott's avatar
Myle Ott committed
34

Sergey Edunov's avatar
Sergey Edunov committed
35
36
37
38
39
40
        self.self_attention = self_attention
        self.encoder_decoder_attention = encoder_decoder_attention

        assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \
                                                             'value to be of the same size'

Myle Ott's avatar
Myle Ott committed
41
42
43
44
45
46
47
        if self.qkv_same_dim:
            self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
        else:
            self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
            self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
            self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))

Myle Ott's avatar
Myle Ott committed
48
        if bias:
49
            self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
Myle Ott's avatar
Myle Ott committed
50
51
        else:
            self.register_parameter('in_proj_bias', None)
Myle Ott's avatar
Myle Ott committed
52

53
        self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
Myle Ott's avatar
Myle Ott committed
54

55
56
57
58
59
60
61
62
        if add_bias_kv:
            self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
            self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
        else:
            self.bias_k = self.bias_v = None

        self.add_zero_attn = add_zero_attn

Myle Ott's avatar
Myle Ott committed
63
64
        self.reset_parameters()

Myle Ott's avatar
Myle Ott committed
65
66
        self.onnx_trace = False

67
68
69
70
71
72
        self.enable_torch_version = False
        if hasattr(F, "multi_head_attention_forward"):
            self.enable_torch_version = True
        else:
            self.enable_torch_version = False

Myle Ott's avatar
Myle Ott committed
73
74
75
    def prepare_for_onnx_export_(self):
        self.onnx_trace = True

Myle Ott's avatar
Myle Ott committed
76
    def reset_parameters(self):
Myle Ott's avatar
Myle Ott committed
77
78
79
80
81
82
83
        if self.qkv_same_dim:
            nn.init.xavier_uniform_(self.in_proj_weight)
        else:
            nn.init.xavier_uniform_(self.k_proj_weight)
            nn.init.xavier_uniform_(self.v_proj_weight)
            nn.init.xavier_uniform_(self.q_proj_weight)

Myle Ott's avatar
Myle Ott committed
84
        nn.init.xavier_uniform_(self.out_proj.weight)
Myle Ott's avatar
Myle Ott committed
85
        if self.in_proj_bias is not None:
Myle Ott's avatar
Myle Ott committed
86
87
            nn.init.constant_(self.in_proj_bias, 0.)
            nn.init.constant_(self.out_proj.bias, 0.)
88
89
90
91
        if self.bias_k is not None:
            nn.init.xavier_normal_(self.bias_k)
        if self.bias_v is not None:
            nn.init.xavier_normal_(self.bias_v)
Myle Ott's avatar
Myle Ott committed
92

93
94
    def forward(self, query, key, value, key_padding_mask=None, incremental_state=None,
                need_weights=True, static_kv=False, attn_mask=None):
Myle Ott's avatar
Myle Ott committed
95
96
        """Input shape: Time x Batch x Channel

Sergey Edunov's avatar
Sergey Edunov committed
97
        Timesteps can be masked by supplying a T x T mask in the
98
        `attn_mask` argument. Padding elements can be excluded from
Myle Ott's avatar
Myle Ott committed
99
100
101
        the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
        batch x src_len, where padding elements are indicated by 1s.
        """
102
        tgt_len, bsz, embed_dim = query.size()
Myle Ott's avatar
Myle Ott committed
103
104
105
        assert embed_dim == self.embed_dim
        assert list(query.size()) == [tgt_len, bsz, embed_dim]

106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
        if self.enable_torch_version and not self.onnx_trace and incremental_state is None and not static_kv:
            if self.qkv_same_dim:
                return F.multi_head_attention_forward(query, key, value,
                                                      self.embed_dim, self.num_heads,
                                                      self.in_proj_weight,
                                                      self.in_proj_bias, self.bias_k, self.bias_v,
                                                      self.add_zero_attn, self.dropout,
                                                      self.out_proj.weight, self.out_proj.bias,
                                                      self.training, key_padding_mask, need_weights,
                                                      attn_mask)
            else:
                return F.multi_head_attention_forward(query, key, value,
                                                      self.embed_dim, self.num_heads,
                                                      torch.empty([0]),
                                                      self.in_proj_bias, self.bias_k, self.bias_v,
                                                      self.add_zero_attn, self.dropout,
                                                      self.out_proj.weight, self.out_proj.bias,
                                                      self.training, key_padding_mask, need_weights,
                                                      attn_mask, use_separate_proj_weight=True,
                                                      q_proj_weight=self.q_proj_weight,
                                                      k_proj_weight=self.k_proj_weight,
                                                      v_proj_weight=self.v_proj_weight)

129
130
131
132
133
134
        if incremental_state is not None:
            saved_state = self._get_input_buffer(incremental_state)
            if 'prev_key' in saved_state:
                # previous time steps are cached - no need to recompute
                # key and value if they are static
                if static_kv:
Sergey Edunov's avatar
Sergey Edunov committed
135
                    assert self.encoder_decoder_attention and not self.self_attention
136
137
138
                    key = value = None
        else:
            saved_state = None
Myle Ott's avatar
Myle Ott committed
139

Sergey Edunov's avatar
Sergey Edunov committed
140
        if self.self_attention:
Myle Ott's avatar
Myle Ott committed
141
142
            # self-attention
            q, k, v = self.in_proj_qkv(query)
Sergey Edunov's avatar
Sergey Edunov committed
143
        elif self.encoder_decoder_attention:
Myle Ott's avatar
Myle Ott committed
144
145
            # encoder-decoder attention
            q = self.in_proj_q(query)
146
147
            if key is None:
                assert value is None
Myle Ott's avatar
Myle Ott committed
148
                k = v = None
149
            else:
Myle Ott's avatar
Myle Ott committed
150
151
152
                k = self.in_proj_k(key)
                v = self.in_proj_v(key)

Myle Ott's avatar
Myle Ott committed
153
154
155
156
157
158
        else:
            q = self.in_proj_q(query)
            k = self.in_proj_k(key)
            v = self.in_proj_v(value)
        q *= self.scaling

159
160
161
162
163
164
165
166
167
168
        if self.bias_k is not None:
            assert self.bias_v is not None
            k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
            v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
            if attn_mask is not None:
                attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
            if key_padding_mask is not None:
                key_padding_mask = torch.cat(
                    [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)

169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
        q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
        if k is not None:
            k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
        if v is not None:
            v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)

        if saved_state is not None:
            # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
            if 'prev_key' in saved_state:
                prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim)
                if static_kv:
                    k = prev_key
                else:
                    k = torch.cat((prev_key, k), dim=1)
            if 'prev_value' in saved_state:
                prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim)
                if static_kv:
                    v = prev_value
                else:
                    v = torch.cat((prev_value, v), dim=1)
            saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim)
            saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim)

            self._set_input_buffer(incremental_state, saved_state)

        src_len = k.size(1)
195

196
197
198
199
200
        # This is part of a workaround to get around fork/join parallelism
        # not supporting Optional types.
        if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
            key_padding_mask = None

201
202
203
204
        if key_padding_mask is not None:
            assert key_padding_mask.size(0) == bsz
            assert key_padding_mask.size(1) == src_len

205
206
207
208
209
210
211
        if self.add_zero_attn:
            src_len += 1
            k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
            v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
            if attn_mask is not None:
                attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
            if key_padding_mask is not None:
Haoran Li's avatar
Haoran Li committed
212
213
                key_padding_mask = torch.cat(
                    [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
Myle Ott's avatar
Myle Ott committed
214
215

        attn_weights = torch.bmm(q, k.transpose(1, 2))
216
217
        attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)

Myle Ott's avatar
Myle Ott committed
218
        assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
219

220
        if attn_mask is not None:
Haoran Li's avatar
Haoran Li committed
221
222
223
224
225
            attn_mask = attn_mask.unsqueeze(0)
            if self.onnx_trace:
                attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
            attn_weights += attn_mask

226
        if key_padding_mask is not None:
Myle Ott's avatar
Myle Ott committed
227
            # don't attend to padding symbols
228
            attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
Haoran Li's avatar
Haoran Li committed
229
230
231
232
233
234
235
            if self.onnx_trace:
                attn_weights = torch.where(
                    key_padding_mask.unsqueeze(1).unsqueeze(2),
                    torch.Tensor([float("-Inf")]),
                    attn_weights.float()
                ).type_as(attn_weights)
            else:
236
                attn_weights = attn_weights.masked_fill(
Haoran Li's avatar
Haoran Li committed
237
238
                    key_padding_mask.unsqueeze(1).unsqueeze(2),
                    float('-inf'),
239
                )
240
            attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
241

Myle Ott's avatar
Myle Ott committed
242
243
244
        attn_weights = utils.softmax(
            attn_weights, dim=-1, onnx_trace=self.onnx_trace,
        ).type_as(attn_weights)
Myle Ott's avatar
Myle Ott committed
245
246
247
248
        attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)

        attn = torch.bmm(attn_weights, v)
        assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
249
250
251
252
253
254
        if (self.onnx_trace and attn.size(1) == 1):
            # when ONNX tracing a single decoder step (sequence length == 1)
            # the transpose is a no-op copy before view, thus unnecessary
            attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
        else:
            attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
Myle Ott's avatar
Myle Ott committed
255
256
        attn = self.out_proj(attn)

257
258
259
260
261
262
        if need_weights:
            # average attention weights over heads
            attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
            attn_weights = attn_weights.sum(dim=1) / self.num_heads
        else:
            attn_weights = None
Myle Ott's avatar
Myle Ott committed
263
264
265
266
267
268
269

        return attn, attn_weights

    def in_proj_qkv(self, query):
        return self._in_proj(query).chunk(3, dim=-1)

    def in_proj_q(self, query):
Myle Ott's avatar
Myle Ott committed
270
271
272
273
274
275
276
        if self.qkv_same_dim:
            return self._in_proj(query, end=self.embed_dim)
        else:
            bias = self.in_proj_bias
            if bias is not None:
                bias = bias[:self.embed_dim]
            return F.linear(query, self.q_proj_weight, bias)
Myle Ott's avatar
Myle Ott committed
277
278

    def in_proj_k(self, key):
Myle Ott's avatar
Myle Ott committed
279
280
281
282
283
284
285
286
        if self.qkv_same_dim:
            return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
        else:
            weight = self.k_proj_weight
            bias = self.in_proj_bias
            if bias is not None:
                bias = bias[self.embed_dim:2 * self.embed_dim]
            return F.linear(key, weight, bias)
Myle Ott's avatar
Myle Ott committed
287
288

    def in_proj_v(self, value):
Myle Ott's avatar
Myle Ott committed
289
290
291
292
293
294
295
296
        if self.qkv_same_dim:
            return self._in_proj(value, start=2 * self.embed_dim)
        else:
            weight = self.v_proj_weight
            bias = self.in_proj_bias
            if bias is not None:
                bias = bias[2 * self.embed_dim:]
            return F.linear(value, weight, bias)
Myle Ott's avatar
Myle Ott committed
297

Myle Ott's avatar
Myle Ott committed
298
    def _in_proj(self, input, start=0, end=None):
Myle Ott's avatar
Myle Ott committed
299
300
        weight = self.in_proj_weight
        bias = self.in_proj_bias
Myle Ott's avatar
Myle Ott committed
301
302
303
        weight = weight[start:end, :]
        if bias is not None:
            bias = bias[start:end]
Myle Ott's avatar
Myle Ott committed
304
305
        return F.linear(input, weight, bias)

306
307
308
309
310
    def reorder_incremental_state(self, incremental_state, new_order):
        """Reorder buffered internal state (for incremental generation)."""
        input_buffer = self._get_input_buffer(incremental_state)
        if input_buffer is not None:
            for k in input_buffer.keys():
311
                input_buffer[k] = input_buffer[k].index_select(0, new_order)
312
313
314
315
            self._set_input_buffer(incremental_state, input_buffer)

    def _get_input_buffer(self, incremental_state):
        return utils.get_incremental_state(
316
317
318
319
            self,
            incremental_state,
            'attn_state',
        ) or {}
320
321
322
323
324
325
326
327

    def _set_input_buffer(self, incremental_state, buffer):
        utils.set_incremental_state(
            self,
            incremental_state,
            'attn_state',
            buffer,
        )
328
329
330

    def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
        return attn_weights