multihead_attention.py 8.07 KB
Newer Older
Myle Ott's avatar
Myle Ott committed
1
2
3
4
5
6
7
8
9
10
11
12
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.

import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F

Myle Ott's avatar
Myle Ott committed
13
14
from fairseq import utils

Myle Ott's avatar
Myle Ott committed
15
16
17
18
19
20
21
22
23
24
25
26

class MultiheadAttention(nn.Module):
    """Multi-headed attention.

    See "Attention Is All You Need" for more details.
    """
    def __init__(self, embed_dim, num_heads, dropout=0., bias=True):
        super().__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
27
        assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
Myle Ott's avatar
Myle Ott committed
28
29
30
        self.scaling = self.head_dim**-0.5
        self._mask = None

31
        self.in_proj_weight = Parameter(torch.Tensor(3*embed_dim, embed_dim))
Myle Ott's avatar
Myle Ott committed
32
        if bias:
33
            self.in_proj_bias = Parameter(torch.Tensor(3*embed_dim))
Myle Ott's avatar
Myle Ott committed
34
35
        else:
            self.register_parameter('in_proj_bias', None)
36
        self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
Myle Ott's avatar
Myle Ott committed
37
38
39
40

        self.reset_parameters()

    def reset_parameters(self):
Myle Ott's avatar
Myle Ott committed
41
42
        nn.init.xavier_uniform_(self.in_proj_weight)
        nn.init.xavier_uniform_(self.out_proj.weight)
Myle Ott's avatar
Myle Ott committed
43
        if self.in_proj_bias is not None:
Myle Ott's avatar
Myle Ott committed
44
45
            nn.init.constant_(self.in_proj_bias, 0.)
            nn.init.constant_(self.out_proj.bias, 0.)
Myle Ott's avatar
Myle Ott committed
46
47

    def forward(self, query, key, value, mask_future_timesteps=False,
48
49
                key_padding_mask=None, incremental_state=None,
                need_weights=True, static_kv=False):
Myle Ott's avatar
Myle Ott committed
50
51
52
53
54
55
56
57
        """Input shape: Time x Batch x Channel

        Self-attention can be implemented by passing in the same arguments for
        query, key and value. Future timesteps can be masked with the
        `mask_future_timesteps` argument. Padding elements can be excluded from
        the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
        batch x src_len, where padding elements are indicated by 1s.
        """
58
59
60
61
62

        qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
        kv_same = key.data_ptr() == value.data_ptr()

        tgt_len, bsz, embed_dim = query.size()
Myle Ott's avatar
Myle Ott committed
63
64
65
66
        assert embed_dim == self.embed_dim
        assert list(query.size()) == [tgt_len, bsz, embed_dim]
        assert key.size() == value.size()

67
68
69
70
71
72
73
74
75
76
        if incremental_state is not None:
            saved_state = self._get_input_buffer(incremental_state)
            if 'prev_key' in saved_state:
                # previous time steps are cached - no need to recompute
                # key and value if they are static
                if static_kv:
                    assert kv_same and not qkv_same
                    key = value = None
        else:
            saved_state = None
Myle Ott's avatar
Myle Ott committed
77

78
        if qkv_same:
Myle Ott's avatar
Myle Ott committed
79
80
            # self-attention
            q, k, v = self.in_proj_qkv(query)
81
        elif kv_same:
Myle Ott's avatar
Myle Ott committed
82
83
            # encoder-decoder attention
            q = self.in_proj_q(query)
84
85
86
87
88
89
90
            if key is None:
                assert value is None
                # this will allow us to concat it with previous value and get
                # just get the previous value
                k = v = q.new(0)
            else:
                k, v = self.in_proj_kv(key)
Myle Ott's avatar
Myle Ott committed
91
92
93
94
95
96
        else:
            q = self.in_proj_q(query)
            k = self.in_proj_k(key)
            v = self.in_proj_v(value)
        q *= self.scaling

97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
        if saved_state is not None:
            if 'prev_key' in saved_state:
                k = torch.cat((saved_state['prev_key'], k), dim=0)
            if 'prev_value' in saved_state:
                v = torch.cat((saved_state['prev_value'], v), dim=0)
            saved_state['prev_key'] = k
            saved_state['prev_value'] = v
            self._set_input_buffer(incremental_state, saved_state)

        src_len = k.size(0)

        if key_padding_mask is not None:
            assert key_padding_mask.size(0) == bsz
            assert key_padding_mask.size(1) == src_len

Myle Ott's avatar
Myle Ott committed
112
113
114
115
116
117
        q = q.contiguous().view(tgt_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)
        k = k.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)
        v = v.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)

        attn_weights = torch.bmm(q, k.transpose(1, 2))
        assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
118
119
120

        # only apply masking at training time (when incremental state is None)
        if mask_future_timesteps and incremental_state is None:
Myle Ott's avatar
Myle Ott committed
121
122
123
            assert query.size() == key.size(), \
                'mask_future_timesteps only applies to self-attention'
            attn_weights += self.buffered_mask(attn_weights).unsqueeze(0)
124
        if key_padding_mask is not None:
Myle Ott's avatar
Myle Ott committed
125
            # don't attend to padding symbols
126
            attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
127
            attn_weights = attn_weights.float().masked_fill(
128
                key_padding_mask.unsqueeze(1).unsqueeze(2),
129
130
                float('-inf'),
            ).type_as(attn_weights)  # FP16 support: cast to float and back
131
            attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
132
        attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights)
Myle Ott's avatar
Myle Ott committed
133
134
135
136
137
138
139
        attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)

        attn = torch.bmm(attn_weights, v)
        assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
        attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
        attn = self.out_proj(attn)

140
141
142
143
144
145
        if need_weights:
            # average attention weights over heads
            attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
            attn_weights = attn_weights.sum(dim=1) / self.num_heads
        else:
            attn_weights = None
Myle Ott's avatar
Myle Ott committed
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179

        return attn, attn_weights

    def in_proj_qkv(self, query):
        return self._in_proj(query).chunk(3, dim=-1)

    def in_proj_kv(self, key):
        return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)

    def in_proj_q(self, query):
        return self._in_proj(query, end=self.embed_dim)

    def in_proj_k(self, key):
        return self._in_proj(key, start=self.embed_dim, end=2*self.embed_dim)

    def in_proj_v(self, value):
        return self._in_proj(value, start=2*self.embed_dim)

    def _in_proj(self, input, start=None, end=None):
        weight = self.in_proj_weight
        bias = self.in_proj_bias
        if end is not None:
            weight = weight[:end, :]
            if bias is not None:
                bias = bias[:end]
        if start is not None:
            weight = weight[start:, :]
            if bias is not None:
                bias = bias[start:]
        return F.linear(input, weight, bias)

    def buffered_mask(self, tensor):
        dim = tensor.size(-1)
        if self._mask is None:
180
            self._mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
Myle Ott's avatar
Myle Ott committed
181
        if self._mask.size(0) < dim:
182
            self._mask = torch.triu(utils.fill_with_neg_inf(self._mask.resize_(dim, dim)), 1)
Myle Ott's avatar
Myle Ott committed
183
        return self._mask[:dim, :dim]
184
185
186
187
188
189
190
191
192
193
194

    def reorder_incremental_state(self, incremental_state, new_order):
        """Reorder buffered internal state (for incremental generation)."""
        input_buffer = self._get_input_buffer(incremental_state)
        if input_buffer is not None:
            for k in input_buffer.keys():
                input_buffer[k] = input_buffer[k].index_select(1, new_order)
            self._set_input_buffer(incremental_state, input_buffer)

    def _get_input_buffer(self, incremental_state):
        return utils.get_incremental_state(
195
196
197
198
            self,
            incremental_state,
            'attn_state',
        ) or {}
199
200
201
202
203
204
205
206

    def _set_input_buffer(self, incremental_state, buffer):
        utils.set_incremental_state(
            self,
            incremental_state,
            'attn_state',
            buffer,
        )