Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
OpenFold
Commits
a7c0d0d1
Commit
a7c0d0d1
authored
Sep 18, 2023
by
Sachin Kadyan
Browse files
Added sequence embedding mode test for `model`.
parent
777d738a
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
40 additions
and
0 deletions
+40
-0
tests/test_model.py
tests/test_model.py
+40
-0
No files found.
tests/test_model.py
View file @
a7c0d0d1
...
...
@@ -77,6 +77,46 @@ class TestModel(unittest.TestCase):
with
torch
.
no_grad
():
out
=
model
(
batch
)
def
test_dry_run_seqemb_mode
(
self
):
n_seq
=
1
n_templ
=
consts
.
n_templ
n_res
=
consts
.
n_res
msa_dim
=
49
c
=
model_config
(
"seq_model_esm1b"
)
c
.
model
.
evoformer_stack
.
no_blocks
=
2
c
.
model
.
evoformer_stack
.
blocks_per_ckpt
=
None
model
=
AlphaFold
(
c
)
model
.
to
(
torch
.
device
(
'cuda'
))
model
.
eval
()
batch
=
{}
tf
=
torch
.
randint
(
c
.
model
.
preembedding_embedder
.
tf_dim
-
1
,
size
=
(
n_res
,))
batch
[
"target_feat"
]
=
nn
.
functional
.
one_hot
(
tf
,
c
.
model
.
preembedding_embedder
.
tf_dim
).
float
()
batch
[
"aatype"
]
=
torch
.
argmax
(
batch
[
"target_feat"
],
dim
=-
1
)
batch
[
"residue_index"
]
=
torch
.
arange
(
n_res
)
batch
[
"msa_feat"
]
=
torch
.
rand
((
n_seq
,
n_res
,
msa_dim
))
batch
[
"seq_embedding"
]
=
torch
.
rand
((
n_res
,
c
.
model
.
preembedding_embedder
.
preembedding_dim
))
t_feats
=
random_template_feats
(
n_templ
,
n_res
)
batch
.
update
({
k
:
torch
.
tensor
(
v
)
for
k
,
v
in
t_feats
.
items
()})
batch
[
"seq_mask"
]
=
torch
.
randint
(
low
=
0
,
high
=
2
,
size
=
(
n_res
,)).
float
()
batch
.
update
(
data_transforms
.
make_atom14_masks
(
batch
))
batch
[
"msa_mask"
]
=
torch
.
randint
(
low
=
0
,
high
=
2
,
size
=
(
n_seq
,
n_res
)).
float
()
batch
[
"no_recycling_iters"
]
=
torch
.
tensor
(
2.
)
add_recycling_dims
=
lambda
t
:
(
t
.
unsqueeze
(
-
1
).
expand
(
*
t
.
shape
,
c
.
data
.
common
.
max_recycling_iters
)
)
batch
=
tensor_tree_map
(
add_recycling_dims
,
batch
)
to_cuda_device
=
lambda
t
:
t
.
to
(
torch
.
device
(
"cuda"
))
batch
=
tensor_tree_map
(
to_cuda_device
,
batch
)
with
torch
.
no_grad
():
out
=
model
(
batch
)
@
compare_utils
.
skip_unless_alphafold_installed
()
def
test_compare
(
self
):
def
run_alphafold
(
batch
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment