Commit 047e69af authored by Sachin Kadyan's avatar Sachin Kadyan
Browse files

Fix for a bug in data_transforms which wouldn't allow creation of MSA mask if...

Fix for a bug in data_transforms which wouldn't allow creation of MSA mask if there is only input sequence in MSA.
- Set `max_msa_clusters=1` in model presets for allowing the input sequence to be a MSA cluster centre.
parent 624b5aa6
...@@ -160,11 +160,15 @@ def model_config( ...@@ -160,11 +160,15 @@ def model_config(
# In seqemb mode, we turn off the ExtraMSAStack and Evoformer's column attention. # In seqemb mode, we turn off the ExtraMSAStack and Evoformer's column attention.
c.model.extra_msa.enabled = False c.model.extra_msa.enabled = False
c.model.evoformer_stack.no_column_attention = True c.model.evoformer_stack.no_column_attention = True
c.data.train.max_msa_clusters = 1
c.data.eval.max_msa_clusters = 1
elif name == "seqemb_finetuning": elif name == "seqemb_finetuning":
c.data.seqemb_mode.enabled = True c.data.seqemb_mode.enabled = True
c.globals.seqemb_mode_enabled = True c.globals.seqemb_mode_enabled = True
c.model.extra_msa.enabled = False c.model.extra_msa.enabled = False
c.model.evoformer_stack.no_column_attention = True c.model.evoformer_stack.no_column_attention = True
c.data.train.max_msa_clusters = 1
c.data.eval.max_msa_clusters = 1
c.data.train.crop_size = 384 c.data.train.crop_size = 384
c.loss.violation.weight = 1. c.loss.violation.weight = 1.
c.loss.experimentally_resolved.weight = 0.01 c.loss.experimentally_resolved.weight = 0.01
...@@ -176,6 +180,7 @@ def model_config( ...@@ -176,6 +180,7 @@ def model_config(
c.data.common.use_templates = True c.data.common.use_templates = True
c.data.common.use_template_torsion_angles = True c.data.common.use_template_torsion_angles = True
c.model.template.enabled = True c.model.template.enabled = True
c.data.predict.max_msa_clusters = 1
else: else:
raise ValueError("Invalid model name") raise ValueError("Invalid model name")
...@@ -337,9 +342,9 @@ config = mlc.ConfigDict( ...@@ -337,9 +342,9 @@ config = mlc.ConfigDict(
"seqemb_mode": { # Configuration for sequence embedding mode "seqemb_mode": { # Configuration for sequence embedding mode
"enabled": False, # If True, use seq emb instead of MSA "enabled": False, # If True, use seq emb instead of MSA
"seqemb_config": { "seqemb_config": {
"max_msa_clusters": 0, "max_msa_clusters": 1,
"max_extra_msa": 0, "max_extra_msa": 0,
"max_distillation_msa_clusters": 0 "max_distillation_msa_clusters": 1
}, },
}, },
"supervised": { "supervised": {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment