test_model.py 7.8 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2021 AlQuraishi Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Christina Floristean's avatar
Christina Floristean committed
15
from pathlib import Path
16
import pickle
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
17
import torch
18
import torch.nn as nn
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
19
20
import numpy as np
import unittest
21
from openfold.config import model_config
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
22
from openfold.data import data_transforms
23
from openfold.model.model import AlphaFold
24
from openfold.utils.tensor_utils import tensor_tree_map
25
26
27
import tests.compare_utils as compare_utils
from tests.config import consts
from tests.data_utils import (
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
28
29
    random_template_feats,
    random_extra_msa_feats,
30
    random_asym_ids
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
31
32
)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
33
if compare_utils.alphafold_is_installed():
34
35
36
37
    alphafold = compare_utils.import_alphafold()
    import jax
    import haiku as hk

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
38
39

class TestModel(unittest.TestCase):
40
41
    @classmethod
    def setUpClass(cls):
42
43
44
45
46
47
48
49
50
51
52
        if compare_utils.alphafold_is_installed():
            if consts.is_multimer:
                cls.am_atom = alphafold.model.all_atom_multimer
                cls.am_fold = alphafold.model.folding_multimer
                cls.am_modules = alphafold.model.modules_multimer
                cls.am_rigid = alphafold.model.geometry
            else:
                cls.am_atom = alphafold.model.all_atom
                cls.am_fold = alphafold.model.folding
                cls.am_modules = alphafold.model.modules
                cls.am_rigid = alphafold.model.r3
53

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
54
    def test_dry_run(self):
55
56
57
58
        n_seq = consts.n_seq
        n_templ = consts.n_templ
        n_res = consts.n_res
        n_extra_seq = consts.n_extra
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
59

Christina Floristean's avatar
Christina Floristean committed
60
        c = model_config(consts.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
61
62
        c.model.evoformer_stack.no_blocks = 4  # no need to go overboard here
        c.model.evoformer_stack.blocks_per_ckpt = None  # don't want to set up
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
63
        # deepspeed for this test
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
64

65
        model = AlphaFold(c).cuda()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
66
        model.eval()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
67
68

        batch = {}
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
69
        tf = torch.randint(c.model.input_embedder.tf_dim - 1, size=(n_res,))
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
70
        batch["target_feat"] = nn.functional.one_hot(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
71
            tf, c.model.input_embedder.tf_dim
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
72
        ).float()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
73
74
        batch["aatype"] = torch.argmax(batch["target_feat"], dim=-1)
        batch["residue_index"] = torch.arange(n_res)
Christina Floristean's avatar
Christina Floristean committed
75

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
76
        batch["msa_feat"] = torch.rand((n_seq, n_res, c.model.input_embedder.msa_dim))
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
77
        t_feats = random_template_feats(n_templ, n_res)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
78
79
80
        batch.update({k: torch.tensor(v) for k, v in t_feats.items()})
        extra_feats = random_extra_msa_feats(n_extra_seq, n_res)
        batch.update({k: torch.tensor(v) for k, v in extra_feats.items()})
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
81
        batch["msa_mask"] = torch.randint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
82
            low=0, high=2, size=(n_seq, n_res)
83
        ).float()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
84
        batch["seq_mask"] = torch.randint(low=0, high=2, size=(n_res,)).float()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
85
86
        batch.update(data_transforms.make_atom14_masks(batch))
        batch["no_recycling_iters"] = torch.tensor(2.)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
87

88
        if consts.is_multimer:
89
90
91
            batch["asym_id"] = torch.as_tensor(random_asym_ids(n_res))
            batch["entity_id"] = batch["asym_id"].clone()
            batch["sym_id"] = torch.ones(n_res)
92
93
            batch["extra_deletion_matrix"] = torch.randint(0, 2, size=(n_extra_seq, n_res))

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
94
        add_recycling_dims = lambda t: (
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
95
            t.unsqueeze(-1).expand(*t.shape, c.data.common.max_recycling_iters)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
96
        )
97
        batch = tensor_tree_map(add_recycling_dims, batch)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
98

99
100
101
        to_cuda_device = lambda t: t.cuda()
        batch = tensor_tree_map(to_cuda_device, batch)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
102
        with torch.no_grad():
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
103
104
            out = model(batch)

105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
    def test_dry_run_seqemb_mode(self):
        n_seq = 1
        n_templ = consts.n_templ
        n_res = consts.n_res
        msa_dim = 49

        c = model_config("seq_model_esm1b")
        c.model.evoformer_stack.no_blocks = 2
        c.model.evoformer_stack.blocks_per_ckpt = None
        model = AlphaFold(c)
        model.to(torch.device('cuda'))
        model.eval()

        batch = {}
        tf = torch.randint(c.model.preembedding_embedder.tf_dim - 1, size=(n_res,))
        batch["target_feat"] = nn.functional.one_hot(tf, c.model.preembedding_embedder.tf_dim).float()
        batch["aatype"] = torch.argmax(batch["target_feat"], dim=-1)
        batch["residue_index"] = torch.arange(n_res)
        batch["msa_feat"] = torch.rand((n_seq, n_res, msa_dim))
        batch["seq_embedding"] = torch.rand((n_res, c.model.preembedding_embedder.preembedding_dim))

        t_feats = random_template_feats(n_templ, n_res)
        batch.update({k: torch.tensor(v) for k, v in t_feats.items()})

        batch["seq_mask"] = torch.randint(low=0, high=2, size=(n_res,)).float()
        batch.update(data_transforms.make_atom14_masks(batch))
        batch["msa_mask"] = torch.randint(low=0, high=2, size=(n_seq, n_res)).float()
132

133
134
135
136
137
138
139
140
141
        batch["no_recycling_iters"] = torch.tensor(2.)
        add_recycling_dims = lambda t: (
            t.unsqueeze(-1).expand(*t.shape, c.data.common.max_recycling_iters)
        )
        batch = tensor_tree_map(add_recycling_dims, batch)

        to_cuda_device = lambda t: t.to(torch.device("cuda"))
        batch = tensor_tree_map(to_cuda_device, batch)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
142
        with torch.no_grad():
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
143
144
            out = model(batch)

145
    @compare_utils.skip_unless_alphafold_installed()
Christina Floristean's avatar
Christina Floristean committed
146
    @unittest.skipIf(consts.is_multimer, "Additional changes required for multimer.")
147
    def test_compare(self):
Christina Floristean's avatar
Christina Floristean committed
148
        #TODO: Fix test data for multimer MSA features
149
150
        def run_alphafold(batch):
            config = compare_utils.get_alphafold_config()
151
152

            model = self.am_modules.AlphaFold(config.model)
Christina Floristean's avatar
Christina Floristean committed
153

154
            return model(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
155
156
157
                batch=batch,
                is_training=False,
                return_representations=True,
158
159
160
161
            )

        f = hk.transform(run_alphafold)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
162
        params = compare_utils.fetch_alphafold_module_weights("")
163

Christina Floristean's avatar
Christina Floristean committed
164
165
        fpath = Path(__file__).parent.resolve() / "test_data/sample_feats.pickle"
        with open(str(fpath), "rb") as fp:
166
167
            batch = pickle.load(fp)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
168
        out_gt = f.apply(params, jax.random.PRNGKey(42), batch)
169
170
171
172
173

        out_gt = out_gt["structure_module"]["final_atom_positions"]
        # atom37_to_atom14 doesn't like batches
        batch["residx_atom14_to_atom37"] = batch["residx_atom14_to_atom37"][0]
        batch["atom14_atom_exists"] = batch["atom14_atom_exists"][0]
174
175

        out_gt = self.am_atom.atom37_to_atom14(out_gt, batch)
176
        out_gt = torch.as_tensor(np.array(out_gt.block_until_ready()))
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
177

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
178
        batch["no_recycling_iters"] = np.array([3., 3., 3., 3.,])
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
179
        batch = {k: torch.as_tensor(v).cuda() for k, v in batch.items()}
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
180

181
182
183
        batch["aatype"] = batch["aatype"].long()
        batch["template_aatype"] = batch["template_aatype"].long()
        batch["extra_msa"] = batch["extra_msa"].long()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
184
185
186
        batch["residx_atom37_to_atom14"] = batch[
            "residx_atom37_to_atom14"
        ].long()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
187
188
189
190
        batch["template_all_atom_mask"] = batch["template_all_atom_masks"]
        batch.update(
            data_transforms.atom37_to_torsion_angles("template_")(batch)
        )
191
192
193
194
195
196
197
198
199
200
201
202
203

        # Move the recycling dimension to the end
        move_dim = lambda t: t.permute(*range(len(t.shape))[1:], 0)
        batch = tensor_tree_map(move_dim, batch)

        with torch.no_grad():
            model = compare_utils.get_global_pretrained_openfold()
            out_repro = model(batch)

        out_repro = tensor_tree_map(lambda t: t.cpu(), out_repro)

        out_repro = out_repro["sm"]["positions"][-1]
        out_repro = out_repro.squeeze(0)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
204

205
        self.assertTrue(torch.max(torch.abs(out_gt - out_repro)) < 1e-3)