test_deepspeed_evo_attention.py 6.71 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2021 AlQuraishi Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
16
17
"""
Unit tests to compare components of OpenFold run with the DeepSpeed memory-efficient
attention kernel, DS4Sci_EvoformerAttention vs. a stock PyTorch attention implementation.
18
19
20

Note: Some tests are temporarily disabled while we investigate discrepancies related
to using fused attention.
21
22
"""

23
24
25
26
27
28
import torch
import unittest
import numpy as np
import pickle

from openfold.model.primitives import (
29
30
    _attention,
    _deepspeed_evo_attn
31
32
33
)
from tests.config import consts
import tests.compare_utils as compare_utils
34

35
36
37
38
39
40
from openfold.data import data_transforms
from openfold.utils.tensor_utils import tensor_tree_map


class TestDeepSpeedKernel(unittest.TestCase):
    def test_ds_kernel_vs_attention(self):
41
        """Compare attention with and without using DeepSpeed Evoformer kernel."""
42
43
44
45
46
        batch_size = consts.batch_size
        c_hidden = 32
        n = 2 ** 12
        n_seq = 12
        no_heads = 4
47
        dtype = torch.bfloat16
48

49
50
51
        q = torch.rand(batch_size, n_seq, n, no_heads, c_hidden, dtype=dtype).cuda()
        k = torch.rand(batch_size, n_seq, n, no_heads, c_hidden, dtype=dtype).cuda()
        v = torch.rand(batch_size, n_seq, n, no_heads, c_hidden, dtype=dtype).cuda()
52
53

        bias = [torch.rand(batch_size, n_seq, 1, 1, n), torch.rand(batch_size, 1, no_heads, n, n)]
54
        bias = [b.to(dtype=dtype).cuda() for b in bias]
55
56

        with torch.no_grad():
57
58
59
60
61
62
63
            l = _deepspeed_evo_attn(q, k, v, biases=bias).cpu()
            
            q = q.transpose(-2, -3)
            k = k.transpose(-2, -3)
            v = v.transpose(-2, -3)
            real = _attention(q, k, v, biases=bias)
            real = real.transpose(-2, -3).cpu()
64

65
66
        err = torch.max(torch.abs(l - real))
        self.assertTrue(err < consts.eps, f'Error: {err}')
67
68

    def compare_evoformer(self, dtype):
69
70
71
72
73
        """
        Compare Evoformer output with and without using DeepSpeed Evoformer attention kernel.
        Set dtype to confirm the kernel can be used during both training (BF16) and inference (FP32),
        since the kernel itself can run with either BF16 or FP16 precision.
        """
74
75
        n_res = 20
        n_seq = 18
76
        eps = 2e-2
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116

        activations = {
            "msa": torch.rand(n_seq, n_res, consts.c_m, device='cuda', dtype=dtype),
            "pair": torch.rand(n_res, n_res, consts.c_z, device='cuda', dtype=dtype)
        }

        masks = {
            "msa": torch.randint(0, 2, (n_seq, n_res), device='cuda', dtype=dtype),
            "pair": torch.randint(0, 2, (n_res, n_res), device='cuda', dtype=dtype),
        }

        with torch.cuda.amp.autocast(dtype=dtype):
            model = compare_utils.get_global_pretrained_openfold()
            out_repro_msa, out_repro_pair = model.evoformer.blocks[0](
                activations["msa"],
                activations["pair"],
                masks["msa"],
                masks["pair"],
                use_deepspeed_evo_attention=False,
                chunk_size=4,
                _mask_trans=False,
                inplace_safe=False,
            )

            out_repro_msa = out_repro_msa.cpu()
            out_repro_pair = out_repro_pair.cpu()

            out_repro_msa_ds, out_repro_pair_ds = model.evoformer.blocks[0](
                activations["msa"],
                activations["pair"],
                masks["msa"],
                masks["pair"],
                use_deepspeed_evo_attention=True,
                chunk_size=4,
                _mask_trans=False,
                inplace_safe=False,
            )
            out_repro_msa_ds = out_repro_msa_ds.cpu()
            out_repro_pair_ds = out_repro_pair_ds.cpu()

117
118
            self.assertTrue(torch.allclose(torch.abs(out_repro_msa), torch.abs(out_repro_msa_ds), atol=eps))
            self.assertTrue(torch.allclose(torch.abs(out_repro_pair), torch.abs(out_repro_pair_ds), atol=eps))
119
120

    def test_compare_evoformer_bf16(self):
121
        """Run evoformer comparison test with BF16 precision."""
122
123
124
        self.compare_evoformer(torch.bfloat16)

    def test_compare_evoformer_fp32(self):
125
        """Run evoformer comparison test with FP32 precision."""
126
127
        self.compare_evoformer(torch.float32)

128
129
130
131
132
133
    def test_compare_model(self):
        """
        Run full model with and without using DeepSpeed Evoformer attention kernel
        and compare output coordinates
        """
        eps = 2e-2
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
        with open("tests/test_data/sample_feats.pickle", "rb") as fp:
            batch = pickle.load(fp)

        # atom37_to_atom14 doesn't like batches
        batch["residx_atom14_to_atom37"] = batch["residx_atom14_to_atom37"][0]
        batch["atom14_atom_exists"] = batch["atom14_atom_exists"][0]

        batch["no_recycling_iters"] = np.array([3., 3., 3., 3., ])
        batch = {k: torch.as_tensor(v).cuda() for k, v in batch.items()}

        batch["aatype"] = batch["aatype"].long()
        batch["template_aatype"] = batch["template_aatype"].long()
        batch["extra_msa"] = batch["extra_msa"].long()
        batch["residx_atom37_to_atom14"] = batch[
            "residx_atom37_to_atom14"
        ].long()
        batch["template_all_atom_mask"] = batch["template_all_atom_masks"]
        batch.update(
            data_transforms.atom37_to_torsion_angles("template_")(batch)
        )

        # Move the recycling dimension to the end
        move_dim = lambda t: t.permute(*range(len(t.shape))[1:], 0)
        batch = tensor_tree_map(move_dim, batch)

        with torch.no_grad():
160
161
162
            with torch.cuda.amp.autocast(dtype=torch.bfloat16):
                model = compare_utils.get_global_pretrained_openfold()
                out_repro = model(batch)
163

164
165
166
                # Enable kernel
                model.globals.use_deepspeed_evo_attention = True
                out_repro_ds = model(batch)
167

168
169
                out_repro = tensor_tree_map(lambda t: t.cpu(), out_repro)
                out_repro_ds = tensor_tree_map(lambda t: t.cpu(), out_repro_ds)
170

171
172
                out_repro = out_repro["sm"]["positions"][-1].squeeze(0)
                out_repro_ds = out_repro_ds["sm"]["positions"][-1].squeeze(0)
173

174
175
                err = torch.max(torch.abs(out_repro - out_repro_ds))
                self.assertTrue(err < eps, f'Error: {err}')
176

177
178
179

if __name__ == "__main__":
    unittest.main()