test_deepspeed_evo_attention.py 6.36 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2021 AlQuraishi Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
16
17
18
19
"""
Unit tests to compare components of OpenFold run with the DeepSpeed memory-efficient
attention kernel, DS4Sci_EvoformerAttention vs. a stock PyTorch attention implementation.
"""

20
21
22
23
24
25
26
27
28
29
import torch
import unittest
import numpy as np
import pickle

from openfold.model.primitives import (
    Attention,
)
from tests.config import consts
import tests.compare_utils as compare_utils
30
31
32
33
34
35
from tests.data_utils import (
    random_template_feats,
    random_extra_msa_feats,
)

from openfold.config import model_config
36
from openfold.data import data_transforms
37
from openfold.model.model import AlphaFold
38
39
40
41
42
from openfold.utils.tensor_utils import tensor_tree_map


class TestDeepSpeedKernel(unittest.TestCase):
    def test_ds_kernel_vs_attention(self):
43
        """Compare attention with and without using DeepSpeed Evoformer kernel."""
44
45
46
47
48
        batch_size = consts.batch_size
        c_hidden = 32
        n = 2 ** 12
        n_seq = 12
        no_heads = 4
49
        eps = 2e-2
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64

        q = torch.rand(batch_size, n_seq, n, c_hidden).cuda()
        kv = torch.rand(batch_size, n_seq, n, c_hidden).cuda()

        bias = [torch.rand(batch_size, n_seq, 1, 1, n), torch.rand(batch_size, 1, no_heads, n, n)]
        bias = [b.cuda() for b in bias]

        a = Attention(
            c_hidden, c_hidden, c_hidden, c_hidden, no_heads
        ).cuda()

        with torch.no_grad():
            l = a(q, kv, biases=bias, use_deepspeed_evo_attention=True)
            real = a(q, kv, biases=bias)

65
        self.assertTrue(torch.max(torch.abs(l - real)) < eps)
66
67

    def compare_evoformer(self, dtype):
68
69
70
71
72
        """
        Compare Evoformer output with and without using DeepSpeed Evoformer attention kernel.
        Set dtype to confirm the kernel can be used during both training (BF16) and inference (FP32),
        since the kernel itself can run with either BF16 or FP16 precision.
        """
73
74
        n_res = consts.n_res
        n_seq = consts.n_seq
75
        eps = 2e-2
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115

        activations = {
            "msa": torch.rand(n_seq, n_res, consts.c_m, device='cuda', dtype=dtype),
            "pair": torch.rand(n_res, n_res, consts.c_z, device='cuda', dtype=dtype)
        }

        masks = {
            "msa": torch.randint(0, 2, (n_seq, n_res), device='cuda', dtype=dtype),
            "pair": torch.randint(0, 2, (n_res, n_res), device='cuda', dtype=dtype),
        }

        with torch.cuda.amp.autocast(dtype=dtype):
            model = compare_utils.get_global_pretrained_openfold()
            out_repro_msa, out_repro_pair = model.evoformer.blocks[0](
                activations["msa"],
                activations["pair"],
                masks["msa"],
                masks["pair"],
                use_deepspeed_evo_attention=False,
                chunk_size=4,
                _mask_trans=False,
                inplace_safe=False,
            )

            out_repro_msa = out_repro_msa.cpu()
            out_repro_pair = out_repro_pair.cpu()

            out_repro_msa_ds, out_repro_pair_ds = model.evoformer.blocks[0](
                activations["msa"],
                activations["pair"],
                masks["msa"],
                masks["pair"],
                use_deepspeed_evo_attention=True,
                chunk_size=4,
                _mask_trans=False,
                inplace_safe=False,
            )
            out_repro_msa_ds = out_repro_msa_ds.cpu()
            out_repro_pair_ds = out_repro_pair_ds.cpu()

116
117
            self.assertTrue(torch.allclose(torch.abs(out_repro_msa), torch.abs(out_repro_msa_ds), atol=eps))
            self.assertTrue(torch.allclose(torch.abs(out_repro_pair), torch.abs(out_repro_pair_ds), atol=eps))
118
119

    def test_compare_evoformer_bf16(self):
120
        """Run evoformer comparison test with BF16 precision."""
121
122
123
        self.compare_evoformer(torch.bfloat16)

    def test_compare_evoformer_fp32(self):
124
        """Run evoformer comparison test with FP32 precision."""
125
126
        self.compare_evoformer(torch.float32)

127
128
129
130
131
132
    def test_compare_model(self):
        """
        Run full model with and without using DeepSpeed Evoformer attention kernel
        and compare output coordinates
        """
        eps = 2e-2
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
        with open("tests/test_data/sample_feats.pickle", "rb") as fp:
            batch = pickle.load(fp)

        # atom37_to_atom14 doesn't like batches
        batch["residx_atom14_to_atom37"] = batch["residx_atom14_to_atom37"][0]
        batch["atom14_atom_exists"] = batch["atom14_atom_exists"][0]

        batch["no_recycling_iters"] = np.array([3., 3., 3., 3., ])
        batch = {k: torch.as_tensor(v).cuda() for k, v in batch.items()}

        batch["aatype"] = batch["aatype"].long()
        batch["template_aatype"] = batch["template_aatype"].long()
        batch["extra_msa"] = batch["extra_msa"].long()
        batch["residx_atom37_to_atom14"] = batch[
            "residx_atom37_to_atom14"
        ].long()
        batch["template_all_atom_mask"] = batch["template_all_atom_masks"]
        batch.update(
            data_transforms.atom37_to_torsion_angles("template_")(batch)
        )

        # Move the recycling dimension to the end
        move_dim = lambda t: t.permute(*range(len(t.shape))[1:], 0)
        batch = tensor_tree_map(move_dim, batch)

        with torch.no_grad():
            model = compare_utils.get_global_pretrained_openfold()
            out_repro = model(batch)

162
            # Enable kernel
163
            model.globals.use_deepspeed_evo_attention = True
164
165
166
167
168
169
170
171
172
173
            out_repro_ds = model(batch)

            out_repro = tensor_tree_map(lambda t: t.cpu(), out_repro)
            out_repro_ds = tensor_tree_map(lambda t: t.cpu(), out_repro_ds)

            out_repro = out_repro["sm"]["positions"][-1].squeeze(0)
            out_repro_ds = out_repro_ds["sm"]["positions"][-1].squeeze(0)

            self.assertTrue(torch.max(torch.abs(out_repro - out_repro_ds)) < eps)

174
175
176

if __name__ == "__main__":
    unittest.main()