Commit efe6fe3a authored by oahzxl's avatar oahzxl
Browse files

code style

parent 7a23deb5
......@@ -33,23 +33,6 @@ def _benchmark_evoformer(model: torch.nn.Module, node, pair, title):
)
def benchmark_evoformer():
# init data and model
msa_len = 300
pair_len = 800
node = torch.randn(1, msa_len, pair_len, 256).cuda()
pair = torch.randn(1, pair_len, pair_len, 128).cuda()
model = evoformer_base().cuda()
# build autochunk model
max_memory = 3000 # MB
autochunk = _build_autochunk(model, max_memory, node, pair)
# benchmark
_benchmark_evoformer(model, node, pair, "openfold")
_benchmark_evoformer(autochunk, node, pair, "autochunk")
def _build_autochunk(model, max_memory, node, pair):
# trace the module and replace codegen
graph = ColoTracer().trace(
......@@ -81,5 +64,22 @@ def _build_autochunk(model, max_memory, node, pair):
return gm
def benchmark_evoformer():
# init data and model
msa_len = 300
pair_len = 800
node = torch.randn(1, msa_len, pair_len, 256).cuda()
pair = torch.randn(1, pair_len, pair_len, 128).cuda()
model = evoformer_base().cuda()
# build autochunk model
max_memory = 3000 # MB
autochunk = _build_autochunk(model, max_memory, node, pair)
# benchmark
_benchmark_evoformer(model, node, pair, "openfold")
_benchmark_evoformer(autochunk, node, pair, "autochunk")
if __name__ == "__main__":
benchmark_evoformer()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment