Commit d7f3d622 authored by Geoffrey Yu's avatar Geoffrey Yu
Browse files

fixed TypeError: enabled must be a bool (got Tensor) when training multimer

parent e963726b
...@@ -155,12 +155,12 @@ def model_config( ...@@ -155,12 +155,12 @@ def model_config(
c.loss.tm.weight = 0.1 c.loss.tm.weight = 0.1
elif "multimer" in name: elif "multimer" in name:
c.globals.is_multimer = True c.globals.is_multimer = True
c.globals.bfloat16 = False c.globals.bfloat16 = True
c.globals.bfloat16_output = False c.globals.bfloat16_output = False
c.loss.masked_msa.num_classes = 22 c.loss.masked_msa.num_classes = 22
c.data.common.max_recycling_iters = 20 c.data.common.max_recycling_iters = 20
for k,v in multimer_model_config_update['model'].items(): for k,v in multimer_model_config_update.items():
c.model[k] = v c.model[k] = v
for k,v in multimer_model_config_update['loss'].items(): for k,v in multimer_model_config_update['loss'].items():
...@@ -593,12 +593,6 @@ config = mlc.ConfigDict( ...@@ -593,12 +593,6 @@ config = mlc.ConfigDict(
"c_out": 37, "c_out": 37,
}, },
}, },
# A negative value indicates that no early stopping will occur, i.e.
# the model will always run `max_recycling_iters` number of recycling
# iterations. A positive value will enable early stopping if the
# difference in pairwise distances is less than the tolerance between
# recycling steps.
"recycle_early_stop_tolerance": -1.
}, },
"relax": { "relax": {
"max_iterations": 0, # no max "max_iterations": 0, # no max
...@@ -679,11 +673,17 @@ config = mlc.ConfigDict( ...@@ -679,11 +673,17 @@ config = mlc.ConfigDict(
"eps": eps, "eps": eps,
}, },
"ema": {"decay": 0.999}, "ema": {"decay": 0.999},
# A negative value indicates that no early stopping will occur, i.e.
# the model will always run `max_recycling_iters` number of recycling
# iterations. A positive value will enable early stopping if the
# difference in pairwise distances is less than the tolerance between
# recycling steps.
"recycle_early_stop_tolerance": -1
} }
) )
multimer_model_config_update = { multimer_model_config_update = {
'model':{"input_embedder": { "input_embedder": {
"tf_dim": 21, "tf_dim": 21,
"msa_dim": 49, "msa_dim": 49,
#"num_msa": 508, #"num_msa": 508,
...@@ -694,20 +694,6 @@ multimer_model_config_update = { ...@@ -694,20 +694,6 @@ multimer_model_config_update = {
"max_relative_idx": 32, "max_relative_idx": 32,
"use_chain_relative": True, "use_chain_relative": True,
}, },
"template": {
"distogram": {
"min_bin": 3.25,
"max_bin": 50.75,
"no_bins": 39,
},
"template_pair_embedder": {
"c_z": c_z,
"c_m": c_m,
"relpos_k": 32,
"max_relative_chain": 2,
"max_relative_idx": 32,
"use_chain_relative": True,
},
"template": { "template": {
"distogram": { "distogram": {
"min_bin": 3.25, "min_bin": 3.25,
...@@ -839,10 +825,6 @@ multimer_model_config_update = { ...@@ -839,10 +825,6 @@ multimer_model_config_update = {
"c_out": 37, "c_out": 37,
}, },
}, },
"recycle_early_stop_tolerance": 0.5
},
"recycle_early_stop_tolerance": 0.5
},
"loss": { "loss": {
"distogram": { "distogram": {
"min_bin": 2.3125, "min_bin": 2.3125,
...@@ -919,4 +901,5 @@ multimer_model_config_update = { ...@@ -919,4 +901,5 @@ multimer_model_config_update = {
}, },
"eps": eps, "eps": eps,
}, },
"recycle_early_stop_tolerance": 0.5
} }
...@@ -190,7 +190,7 @@ class AlphaFold(nn.Module): ...@@ -190,7 +190,7 @@ class AlphaFold(nn.Module):
sq_diff = (distances(prev_pos[..., ca_idx, :]) - distances(next_pos[..., ca_idx, :])) ** 2 sq_diff = (distances(prev_pos[..., ca_idx, :]) - distances(next_pos[..., ca_idx, :])) ** 2
mask = mask[..., None] * mask[..., None, :] mask = mask[..., None] * mask[..., None, :]
sq_diff = masked_mean(mask=mask, value=sq_diff, dim=list(range(len(mask.shape)))) sq_diff = masked_mean(mask=mask, value=sq_diff, dim=list(range(len(mask.shape))))
diff = torch.sqrt(sq_diff + eps) diff = torch.sqrt(sq_diff + eps).item()
return diff <= self.config.recycle_early_stop_tolerance return diff <= self.config.recycle_early_stop_tolerance
def iteration(self, feats, prevs, _recycle=True): def iteration(self, feats, prevs, _recycle=True):
......
# Copyright 2021 AlQuraishi Laboratory
# Dingquan Yu @ EMBL-Hamburg Kosinski group
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import pickle
import torch
import torch.nn as nn
import numpy as np
import unittest
from openfold.config import model_config
from openfold.data import data_transforms
from openfold.model.model import AlphaFold
from openfold.utils.loss import AlphaFoldMultimerLoss
from openfold.utils.tensor_utils import tensor_tree_map
from tests.config import consts
import logging
logger = logging.getLogger(__name__)
import os
from tests.data_utils import (
random_template_feats,
random_extra_msa_feats,
random_affines_vector
)
from openfold.utils.rigid_utils import (
Rigid,
)
class TestPermutation(unittest.TestCase):
def setUp(self):
"""
Firstly setup model configs and model as in
test_model.py
In the test case, use PDB ID 1e4k as the label
"""
self.test_data_dir = os.path.join(os.getcwd(),"tests/test_data")
self.label_ids = ['label_1','label_1','label_2','label_2','label_2']
self.asym_id = [0]*9+[1]*9+[2]*13+[3]*13 + [4]*13
def affine_vector_to_4x4(self,affine):
r = Rigid.from_tensor_7(affine)
return r.to_tensor_4x4()
def test_dry_run(self):
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
n_seq = consts.n_seq
n_templ = consts.n_templ
n_res = len(self.asym_id)
n_extra_seq = consts.n_extra
c = model_config(consts.model, train=True)
c.loss.masked_msa.num_classes = 22 # somehow need overwrite this part in multimer loss config
c.model.evoformer_stack.no_blocks = 4 # no need to go overboard here
c.model.evoformer_stack.blocks_per_ckpt = None # don't want to set up
# deepspeed for this test
model = AlphaFold(c)
multimer_loss = AlphaFoldMultimerLoss(c.loss)
example_label = [pickle.load(open(os.path.join(self.test_data_dir,f"{i}.pkl"),'rb'))
for i in self.label_ids]
batch = {}
tf = torch.randint(c.model.input_embedder.tf_dim - 1, size=(n_res,))
batch["target_feat"] = nn.functional.one_hot(
tf, c.model.input_embedder.tf_dim
).float()
batch["aatype"] = torch.argmax(batch["target_feat"], dim=-1)
batch["residue_index"] = torch.arange(n_res)
backbone_dict ={
"backbone_affine_tensor": torch.tensor(random_affines_vector((n_res,))),
"backbone_affine_mask": torch.from_numpy(np.random.randint(0, 2, (n_res,)).astype(
np.float32
)),
"use_clamped_fape": torch.from_numpy(np.array(0.0)),
}
batch['backbone_rigid_tensor'] = self.affine_vector_to_4x4(backbone_dict['backbone_affine_tensor'])
batch['backbone_rigid_mask'] = backbone_dict['backbone_affine_mask']
true_msa_dict ={
"true_msa": torch.tensor(np.random.randint(0, 21, (n_seq,n_res))),
"bert_mask": torch.tensor(np.random.randint(0, 2, (n_seq,n_res)).astype(
np.float32)
)
}
batch.update(true_msa_dict)
batch["msa_feat"] = torch.rand((n_seq, n_res, c.model.input_embedder.msa_dim))
t_feats = random_template_feats(n_templ, n_res)
batch.update({k: torch.tensor(v) for k, v in t_feats.items()})
extra_feats = random_extra_msa_feats(n_extra_seq, n_res)
batch.update({k: torch.tensor(v) for k, v in extra_feats.items()})
batch["msa_mask"] = torch.randint(
low=0, high=2, size=(n_seq, n_res)
).float()
batch["seq_mask"] = torch.randint(low=0, high=2, size=(n_res,)).float()
batch.update(data_transforms.make_atom14_masks(batch))
batch["no_recycling_iters"] = torch.tensor(2.)
batch["seq_length"] = torch.from_numpy(np.array([n_res] * n_res))
if consts.is_multimer:
#
# Modify asym_id, entity_id and sym_id so that it encodes
# 2 chains
# #
asym_id = self.asym_id
batch["asym_id"] = torch.tensor(asym_id,dtype=torch.float64)
# batch["entity_id"] = torch.randint(0, 1, size=(n_res,))
batch['entity_id'] = torch.tensor([0]*18+[1]*39,dtype=torch.float64)
batch["sym_id"] = torch.tensor(asym_id,dtype=torch.float64)
# batch["num_sym"] = torch.tensor([1]*18+[2]*13,dtype=torch.int64) # currently there are just 2 chains
batch["extra_deletion_matrix"] = torch.randint(0, 2, size=(n_extra_seq, n_res))
add_recycling_dims = lambda t: (
t.unsqueeze(-1).expand(*t.shape, c.data.common.max_recycling_iters)
)
add_batch_size_dimension = lambda t: (
t.unsqueeze(0)
)
batch = tensor_tree_map(add_recycling_dims, batch)
batch = tensor_tree_map(add_batch_size_dimension, batch)
with torch.no_grad():
out = model(batch)
print(f"finished foward on batch with batch_size dim")
multimer_loss(out,(batch,example_label))
# print(f"permuated_labels is {type(permutated_labels)} and keys are:\n {permutated_labels.keys()}")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment