Unverified Commit 1ffd1974 authored by Jennifer Wei's avatar Jennifer Wei Committed by GitHub
Browse files

Merge pull request #533 from jnwei/pl_upgrades

Update pl_upgrades to use numpy 2 and update other dependencies
parents 23cf2f61 620a54fb
......@@ -14,7 +14,7 @@ gunzip -c tests/test_data/sample_feats.pickle.gz > tests/test_data/sample_feats.
python setup.py install
echo "Download CUTLASS, required for Deepspeed Evoformer attention kernel"
git clone https://github.com/NVIDIA/cutlass --depth 1
git clone https://github.com/NVIDIA/cutlass --branch v3.6.0 --depth 1
conda env config vars set CUTLASS_PATH=$PWD/cutlass
# This setting is used to fix a worker assignment issue during data loading
......
......@@ -54,6 +54,7 @@ def get_cuda_bare_metal_version(cuda_dir):
compute_capabilities = set([
(5, 2), # Titan X
(6, 1), # GeForce 1000-series
(9, 0), # Hopper
])
compute_capabilities.add((7, 0))
......@@ -112,7 +113,7 @@ else:
setup(
name='openfold',
version='2.0.0',
version='2.2.0',
description='A PyTorch reimplementation of DeepMind\'s AlphaFold 2',
author='OpenFold Team',
author_email='jennifer.wei@omsf.io',
......
......@@ -315,8 +315,9 @@ class TestDeepSpeedKernel(unittest.TestCase):
# Move the recycling dimension to the end
move_dim = lambda t: t.permute(*range(len(t.shape))[1:], 0)
batch = tensor_tree_map(move_dim, batch)
with torch.no_grad():
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
# Restrict this test to use only torch.float32 precision due to instability with torch.bfloat16
# https://github.com/aqlaboratory/openfold/issues/532
with torch.no_grad(), torch.cuda.amp.autocast(dtype=torch.float32):
model = compare_utils.get_global_pretrained_openfold()
model.globals.use_deepspeed_evo_attention = False
out_repro = model(batch)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment