test_msa.py 7.78 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright 2021 AlQuraishi Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch
import numpy as np
import unittest
18
19
20
21
22
23
24
25
26
from openfold.model.msa import (
    MSARowAttentionWithPairBias,
    MSAColumnAttention,
    MSAColumnGlobalAttention,
)
from openfold.utils.tensor_utils import tree_map
import tests.compare_utils as compare_utils
from tests.config import consts

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
27
if compare_utils.alphafold_is_installed():
28
29
30
    alphafold = compare_utils.import_alphafold()
    import jax
    import haiku as hk
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
31
32
33


class TestMSARowAttentionWithPairBias(unittest.TestCase):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
34
    def test_shape(self):
35
36
37
38
39
        batch_size = consts.batch_size
        n_seq = consts.n_seq
        n_res = consts.n_res
        c_m = consts.c_m
        c_z = consts.c_z
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
40
41
        c = 52
        no_heads = 4
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
42
        chunk_size = None
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
43

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
44
        mrapb = MSARowAttentionWithPairBias(c_m, c_z, c, no_heads)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
45

46
47
        m = torch.rand((batch_size, n_seq, n_res, c_m))
        z = torch.rand((batch_size, n_res, n_res, c_z))
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
48
49

        shape_before = m.shape
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
50
        m = mrapb(m, z=z, chunk_size=chunk_size)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
51
52
53
54
        shape_after = m.shape

        self.assertTrue(shape_before == shape_after)

55
56
57
58
59
60
    @compare_utils.skip_unless_alphafold_installed()
    def test_compare(self):
        def run_msa_row_att(msa_act, msa_mask, pair_act):
            config = compare_utils.get_alphafold_config()
            c_e = config.model.embeddings_and_evoformer.evoformer
            msa_row = alphafold.model.modules.MSARowAttentionWithPairBias(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
61
                c_e.msa_row_attention_with_pair_bias, config.model.global_config
62
            )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
63
            act = msa_row(msa_act=msa_act, msa_mask=msa_mask, pair_act=pair_act)
64
            return act
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
65

66
67
68
69
70
71
        f = hk.transform(run_msa_row_att)

        n_res = consts.n_res
        n_seq = consts.n_seq

        msa_act = np.random.rand(n_seq, n_res, consts.c_m).astype(np.float32)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
72
73
74
        msa_mask = np.random.randint(low=0, high=2, size=(n_seq, n_res)).astype(
            np.float32
        )
75
76
77
78
        pair_act = np.random.rand(n_res, n_res, consts.c_z).astype(np.float32)

        # Fetch pretrained parameters (but only from one block)]
        params = compare_utils.fetch_alphafold_module_weights(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
79
80
            "alphafold/alphafold_iteration/evoformer/evoformer_iteration/"
            + "msa_row_attention"
81
82
83
84
85
86
87
88
89
        )
        params = tree_map(lambda n: n[0], params, jax.numpy.DeviceArray)

        out_gt = f.apply(
            params, None, msa_act, msa_mask, pair_act
        ).block_until_ready()
        out_gt = torch.as_tensor(np.array(out_gt))

        model = compare_utils.get_global_pretrained_openfold()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
90
        out_repro = (
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
91
            model.evoformer.blocks[0].msa_att_row(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
92
                torch.as_tensor(msa_act).cuda(),
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
93
94
95
                z=torch.as_tensor(pair_act).cuda(),
                chunk_size=4,
                mask=torch.as_tensor(msa_mask).cuda(),
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
96
            )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
97
        ).cpu()
98
99
100

        self.assertTrue(torch.all(torch.abs(out_gt - out_repro) < consts.eps))

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
101
102

class TestMSAColumnAttention(unittest.TestCase):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
103
    def test_shape(self):
104
105
106
107
        batch_size = consts.batch_size
        n_seq = consts.n_seq
        n_res = consts.n_res
        c_m = consts.c_m
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
108
109
110
111
112
        c = 44
        no_heads = 4

        msaca = MSAColumnAttention(c_m, c, no_heads)

113
        x = torch.rand((batch_size, n_seq, n_res, c_m))
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
114
115

        shape_before = x.shape
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
116
        x = msaca(x, chunk_size=None)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
117
118
119
120
        shape_after = x.shape

        self.assertTrue(shape_before == shape_after)

121
122
123
124
125
126
    @compare_utils.skip_unless_alphafold_installed()
    def test_compare(self):
        def run_msa_col_att(msa_act, msa_mask):
            config = compare_utils.get_alphafold_config()
            c_e = config.model.embeddings_and_evoformer.evoformer
            msa_col = alphafold.model.modules.MSAColumnAttention(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
127
                c_e.msa_column_attention, config.model.global_config
128
            )
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
129
            act = msa_col(msa_act=msa_act, msa_mask=msa_mask)
130
            return act
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
131

132
133
134
135
136
137
        f = hk.transform(run_msa_col_att)

        n_res = consts.n_res
        n_seq = consts.n_seq

        msa_act = np.random.rand(n_seq, n_res, consts.c_m).astype(np.float32)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
138
139
140
        msa_mask = np.random.randint(low=0, high=2, size=(n_seq, n_res)).astype(
            np.float32
        )
141
142
143

        # Fetch pretrained parameters (but only from one block)]
        params = compare_utils.fetch_alphafold_module_weights(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
144
145
            "alphafold/alphafold_iteration/evoformer/evoformer_iteration/"
            + "msa_column_attention"
146
147
148
        )
        params = tree_map(lambda n: n[0], params, jax.numpy.DeviceArray)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
149
        out_gt = f.apply(params, None, msa_act, msa_mask).block_until_ready()
150
151
152
        out_gt = torch.as_tensor(np.array(out_gt))

        model = compare_utils.get_global_pretrained_openfold()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
153
        out_repro = (
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
154
            model.evoformer.blocks[0].msa_att_col(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
155
                torch.as_tensor(msa_act).cuda(),
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
156
157
                chunk_size=4,
                mask=torch.as_tensor(msa_mask).cuda(),
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
158
            )
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
159
160
161
        ).cpu()

        print(torch.mean(torch.abs(out_gt - out_repro)))
162
163
164

        self.assertTrue(torch.all(torch.abs(out_gt - out_repro) < consts.eps))

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
165
166

class TestMSAColumnGlobalAttention(unittest.TestCase):
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
167
    def test_shape(self):
168
169
170
171
        batch_size = consts.batch_size
        n_seq = consts.n_seq
        n_res = consts.n_res
        c_m = consts.c_m
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
172
173
174
175
176
        c = 44
        no_heads = 4

        msagca = MSAColumnGlobalAttention(c_m, c, no_heads)

177
        x = torch.rand((batch_size, n_seq, n_res, c_m))
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
178
179

        shape_before = x.shape
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
180
        x = msagca(x, chunk_size=None)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
181
182
183
184
        shape_after = x.shape

        self.assertTrue(shape_before == shape_after)

185
186
187
188
189
190
    @compare_utils.skip_unless_alphafold_installed()
    def test_compare(self):
        def run_msa_col_global_att(msa_act, msa_mask):
            config = compare_utils.get_alphafold_config()
            c_e = config.model.embeddings_and_evoformer.evoformer
            msa_col = alphafold.model.modules.MSAColumnGlobalAttention(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
191
192
193
                c_e.msa_column_attention,
                config.model.global_config,
                name="msa_column_global_attention",
194
195
196
            )
            act = msa_col(msa_act=msa_act, msa_mask=msa_mask)
            return act
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
197

198
        f = hk.transform(run_msa_col_global_att)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
199

200
201
202
        n_res = consts.n_res
        n_seq = consts.n_seq
        c_e = consts.c_e
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
203

204
205
        msa_act = np.random.rand(n_seq, n_res, c_e)
        msa_mask = np.random.randint(low=0, high=2, size=(n_seq, n_res))
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
206

207
208
        # Fetch pretrained parameters (but only from one block)]
        params = compare_utils.fetch_alphafold_module_weights(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
209
210
            "alphafold/alphafold_iteration/evoformer/extra_msa_stack/"
            + "msa_column_global_attention"
211
212
        )
        params = tree_map(lambda n: n[0], params, jax.numpy.DeviceArray)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
213
214

        out_gt = f.apply(params, None, msa_act, msa_mask).block_until_ready()
215
        out_gt = torch.as_tensor(np.array(out_gt.block_until_ready()))
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
216

217
        model = compare_utils.get_global_pretrained_openfold()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
218
        out_repro = (
Gustaf Ahdritz's avatar
Fixes  
Gustaf Ahdritz committed
219
            model.extra_msa_stack.blocks[0].msa_att_col(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
220
                torch.as_tensor(msa_act, dtype=torch.float32).cuda(),
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
221
                chunk_size=4,
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
222
223
224
225
226
                mask=torch.as_tensor(msa_mask, dtype=torch.float32).cuda(),
            )
            .cpu()
        )

227
228
        self.assertTrue(torch.max(torch.abs(out_gt - out_repro) < consts.eps))

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
229
230
231

if __name__ == "__main__":
    unittest.main()