test_model.py 7.72 KB
Newer Older
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2021 AlQuraishi Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Christina Floristean's avatar
Christina Floristean committed
15
from pathlib import Path
16
import pickle
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
17
import torch
18
import torch.nn as nn
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
19
20
import numpy as np
import unittest
21
from openfold.config import model_config
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
22
from openfold.data import data_transforms
23
from openfold.model.model import AlphaFold
24
from openfold.utils.tensor_utils import tensor_tree_map
25
26
27
import tests.compare_utils as compare_utils
from tests.config import consts
from tests.data_utils import (
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
28
29
30
31
    random_template_feats,
    random_extra_msa_feats,
)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
32
if compare_utils.alphafold_is_installed():
33
34
35
36
    alphafold = compare_utils.import_alphafold()
    import jax
    import haiku as hk

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
37
38

class TestModel(unittest.TestCase):
39
40
41
42
43
44
45
46
47
48
49
50
51
    @classmethod
    def setUpClass(cls):
        if consts.is_multimer:
            cls.am_atom = alphafold.model.all_atom_multimer
            cls.am_fold = alphafold.model.folding_multimer
            cls.am_modules = alphafold.model.modules_multimer
            cls.am_rigid = alphafold.model.geometry
        else:
            cls.am_atom = alphafold.model.all_atom
            cls.am_fold = alphafold.model.folding
            cls.am_modules = alphafold.model.modules
            cls.am_rigid = alphafold.model.r3

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
52
    def test_dry_run(self):
53
54
55
56
        n_seq = consts.n_seq
        n_templ = consts.n_templ
        n_res = consts.n_res
        n_extra_seq = consts.n_extra
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
57

Christina Floristean's avatar
Christina Floristean committed
58
        c = model_config(consts.model)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
59
60
        c.model.evoformer_stack.no_blocks = 4  # no need to go overboard here
        c.model.evoformer_stack.blocks_per_ckpt = None  # don't want to set up
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
61
        # deepspeed for this test
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
62

63
        model = AlphaFold(c).cuda()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
64
        model.eval()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
65
66

        batch = {}
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
67
        tf = torch.randint(c.model.input_embedder.tf_dim - 1, size=(n_res,))
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
68
        batch["target_feat"] = nn.functional.one_hot(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
69
            tf, c.model.input_embedder.tf_dim
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
70
        ).float()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
71
72
        batch["aatype"] = torch.argmax(batch["target_feat"], dim=-1)
        batch["residue_index"] = torch.arange(n_res)
Christina Floristean's avatar
Christina Floristean committed
73

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
74
        batch["msa_feat"] = torch.rand((n_seq, n_res, c.model.input_embedder.msa_dim))
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
75
        t_feats = random_template_feats(n_templ, n_res)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
76
77
78
        batch.update({k: torch.tensor(v) for k, v in t_feats.items()})
        extra_feats = random_extra_msa_feats(n_extra_seq, n_res)
        batch.update({k: torch.tensor(v) for k, v in extra_feats.items()})
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
79
        batch["msa_mask"] = torch.randint(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
80
            low=0, high=2, size=(n_seq, n_res)
81
        ).float()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
82
        batch["seq_mask"] = torch.randint(low=0, high=2, size=(n_res,)).float()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
83
84
        batch.update(data_transforms.make_atom14_masks(batch))
        batch["no_recycling_iters"] = torch.tensor(2.)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
85

86
87
88
89
90
91
        if consts.is_multimer:
            batch["asym_id"] = torch.randint(0, 1, size=(n_res,))
            batch["entity_id"] = torch.randint(0, 1, size=(n_res,))
            batch["sym_id"] = torch.randint(0, 1, size=(n_res,))
            batch["extra_deletion_matrix"] = torch.randint(0, 2, size=(n_extra_seq, n_res))

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
92
        add_recycling_dims = lambda t: (
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
93
            t.unsqueeze(-1).expand(*t.shape, c.data.common.max_recycling_iters)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
94
        )
95
        batch = tensor_tree_map(add_recycling_dims, batch)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
96

97
98
99
        to_cuda_device = lambda t: t.cuda()
        batch = tensor_tree_map(to_cuda_device, batch)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
100
        with torch.no_grad():
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
101
102
            out = model(batch)

103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
    def test_dry_run_seqemb_mode(self):
        n_seq = 1
        n_templ = consts.n_templ
        n_res = consts.n_res
        msa_dim = 49

        c = model_config("seq_model_esm1b")
        c.model.evoformer_stack.no_blocks = 2
        c.model.evoformer_stack.blocks_per_ckpt = None
        model = AlphaFold(c)
        model.to(torch.device('cuda'))
        model.eval()

        batch = {}
        tf = torch.randint(c.model.preembedding_embedder.tf_dim - 1, size=(n_res,))
        batch["target_feat"] = nn.functional.one_hot(tf, c.model.preembedding_embedder.tf_dim).float()
        batch["aatype"] = torch.argmax(batch["target_feat"], dim=-1)
        batch["residue_index"] = torch.arange(n_res)
        batch["msa_feat"] = torch.rand((n_seq, n_res, msa_dim))
        batch["seq_embedding"] = torch.rand((n_res, c.model.preembedding_embedder.preembedding_dim))

        t_feats = random_template_feats(n_templ, n_res)
        batch.update({k: torch.tensor(v) for k, v in t_feats.items()})

        batch["seq_mask"] = torch.randint(low=0, high=2, size=(n_res,)).float()
        batch.update(data_transforms.make_atom14_masks(batch))
        batch["msa_mask"] = torch.randint(low=0, high=2, size=(n_seq, n_res)).float()
130

131
132
133
134
135
136
137
138
139
        batch["no_recycling_iters"] = torch.tensor(2.)
        add_recycling_dims = lambda t: (
            t.unsqueeze(-1).expand(*t.shape, c.data.common.max_recycling_iters)
        )
        batch = tensor_tree_map(add_recycling_dims, batch)

        to_cuda_device = lambda t: t.to(torch.device("cuda"))
        batch = tensor_tree_map(to_cuda_device, batch)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
140
        with torch.no_grad():
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
141
142
            out = model(batch)

143
    @compare_utils.skip_unless_alphafold_installed()
Christina Floristean's avatar
Christina Floristean committed
144
    @unittest.skipIf(consts.is_multimer, "Additional changes required for multimer.")
145
    def test_compare(self):
Christina Floristean's avatar
Christina Floristean committed
146
        #TODO: Fix test data for multimer MSA features
147
148
        def run_alphafold(batch):
            config = compare_utils.get_alphafold_config()
149
150

            model = self.am_modules.AlphaFold(config.model)
Christina Floristean's avatar
Christina Floristean committed
151

152
            return model(
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
153
154
155
                batch=batch,
                is_training=False,
                return_representations=True,
156
157
158
159
            )

        f = hk.transform(run_alphafold)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
160
        params = compare_utils.fetch_alphafold_module_weights("")
161

Christina Floristean's avatar
Christina Floristean committed
162
163
        fpath = Path(__file__).parent.resolve() / "test_data/sample_feats.pickle"
        with open(str(fpath), "rb") as fp:
164
165
            batch = pickle.load(fp)

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
166
        out_gt = f.apply(params, jax.random.PRNGKey(42), batch)
167
168
169
170
171

        out_gt = out_gt["structure_module"]["final_atom_positions"]
        # atom37_to_atom14 doesn't like batches
        batch["residx_atom14_to_atom37"] = batch["residx_atom14_to_atom37"][0]
        batch["atom14_atom_exists"] = batch["atom14_atom_exists"][0]
172
173

        out_gt = self.am_atom.atom37_to_atom14(out_gt, batch)
174
        out_gt = torch.as_tensor(np.array(out_gt.block_until_ready()))
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
175

Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
176
        batch["no_recycling_iters"] = np.array([3., 3., 3., 3.,])
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
177
        batch = {k: torch.as_tensor(v).cuda() for k, v in batch.items()}
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
178

179
180
181
        batch["aatype"] = batch["aatype"].long()
        batch["template_aatype"] = batch["template_aatype"].long()
        batch["extra_msa"] = batch["extra_msa"].long()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
182
183
184
        batch["residx_atom37_to_atom14"] = batch[
            "residx_atom37_to_atom14"
        ].long()
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
185
186
187
188
        batch["template_all_atom_mask"] = batch["template_all_atom_masks"]
        batch.update(
            data_transforms.atom37_to_torsion_angles("template_")(batch)
        )
189
190
191
192
193
194
195
196
197
198
199
200
201

        # Move the recycling dimension to the end
        move_dim = lambda t: t.permute(*range(len(t.shape))[1:], 0)
        batch = tensor_tree_map(move_dim, batch)

        with torch.no_grad():
            model = compare_utils.get_global_pretrained_openfold()
            out_repro = model(batch)

        out_repro = tensor_tree_map(lambda t: t.cpu(), out_repro)

        out_repro = out_repro["sm"]["positions"][-1]
        out_repro = out_repro.squeeze(0)
Gustaf Ahdritz's avatar
Gustaf Ahdritz committed
202

203
        self.assertTrue(torch.max(torch.abs(out_gt - out_repro)) < 1e-3)