"git@developer.sourcefind.cn:OpenDAS/openfold.git" did not exist on "78fa6c6ea9fd3724125fcf32ace3e9e8a0304f1d"
Commit c8c12397 authored by Sachin Kadyan's avatar Sachin Kadyan
Browse files

Separate out the seq mode configs from vanilla OF config

parent b45a91ba
...@@ -186,6 +186,7 @@ def model_config( ...@@ -186,6 +186,7 @@ def model_config(
# In seqemb mode, we turn off the ExtraMSAStack and Evoformer's column attention. # In seqemb mode, we turn off the ExtraMSAStack and Evoformer's column attention.
c.model.extra_msa.enabled = False c.model.extra_msa.enabled = False
c.model.evoformer_stack.no_column_attention = True c.model.evoformer_stack.no_column_attention = True
c.update(seq_mode_config.copy_and_resolve_references())
if long_sequence_inference: if long_sequence_inference:
assert(not train) assert(not train)
...@@ -284,7 +285,6 @@ config = mlc.ConfigDict( ...@@ -284,7 +285,6 @@ config = mlc.ConfigDict(
"rigidgroups_gt_frames": [NUM_RES, None, None, None], "rigidgroups_gt_frames": [NUM_RES, None, None, None],
"seq_length": [], "seq_length": [],
"seq_mask": [NUM_RES], "seq_mask": [NUM_RES],
"seq_embedding": [NUM_RES, None],
"target_feat": [NUM_RES, None], "target_feat": [NUM_RES, None],
"template_aatype": [NUM_TEMPLATES, NUM_RES], "template_aatype": [NUM_TEMPLATES, NUM_RES],
"template_all_atom_mask": [NUM_TEMPLATES, NUM_RES, None], "template_all_atom_mask": [NUM_TEMPLATES, NUM_RES, None],
...@@ -336,19 +336,11 @@ config = mlc.ConfigDict( ...@@ -336,19 +336,11 @@ config = mlc.ConfigDict(
"deletion_matrix", "deletion_matrix",
"no_recycling_iters", "no_recycling_iters",
], ],
"seqemb_features": [ # List of features to be generated in seqemb mode
"seq_embedding"
],
"use_templates": templates_enabled, "use_templates": templates_enabled,
"use_template_torsion_angles": embed_template_torsion_angles, "use_template_torsion_angles": embed_template_torsion_angles,
}, },
"seqemb_mode": { # Configuration for sequence embedding mode "seqemb_mode": { # Configuration for sequence embedding mode
"enabled": False, # If True, use seq emb instead of MSA "enabled": False, # If True, use seq emb instead of MSA
"seqemb_config": {
"max_msa_clusters": 1,
"max_extra_msa": 0,
"max_distillation_msa_clusters": 1
},
}, },
"supervised": { "supervised": {
"clamp_prob": 0.9, "clamp_prob": 0.9,
...@@ -440,13 +432,6 @@ config = mlc.ConfigDict( ...@@ -440,13 +432,6 @@ config = mlc.ConfigDict(
"c_m": c_m, "c_m": c_m,
"relpos_k": 32, "relpos_k": 32,
}, },
"preembedding_embedder": { # Used in sequence embedding mode
"tf_dim": 22,
"preembedding_dim": preemb_dim_size,
"c_z": c_z,
"c_m": c_m,
"relpos_k": 32,
},
"recycling_embedder": { "recycling_embedder": {
"c_z": c_z, "c_z": c_z,
"c_m": c_m, "c_m": c_m,
...@@ -672,3 +657,31 @@ config = mlc.ConfigDict( ...@@ -672,3 +657,31 @@ config = mlc.ConfigDict(
"ema": {"decay": 0.999}, "ema": {"decay": 0.999},
} }
) )
seq_mode_config = mlc.ConfigDict({
"data": {
"common": {
"feat": {
"seq_embedding": [NUM_RES, None],
},
"seqemb_features": [ # List of features to be generated in seqemb mode
"seq_embedding"
],
},
"seqemb_mode": { # Configuration for sequence embedding mode
"enabled": True, # If True, use seq emb instead of MSA
},
},
"globals": {
"seqemb_mode_enabled": True,
},
"model": {
"preembedding_embedder": { # Used in sequence embedding mode
"tf_dim": 22,
"preembedding_dim": preemb_dim_size,
"c_z": c_z,
"c_m": c_m,
"relpos_k": 32,
},
}
})
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment