Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
OpenFold
Commits
5f5a79a7
"tools/cfgs/git@developer.sourcefind.cn:OpenDAS/openpcdet.git" did not exist on "c7f6de362a2f5aa914a567c55922d56c8850bcfe"
Commit
5f5a79a7
authored
Jan 12, 2024
by
Jennifer
Browse files
initial compatibility changes for upgrading multimer
parent
6ba0a594
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
16 additions
and
15 deletions
+16
-15
environment.yml
environment.yml
+10
-9
openfold/data/data_pipeline.py
openfold/data/data_pipeline.py
+3
-3
openfold/model/primitives.py
openfold/model/primitives.py
+2
-2
setup.py
setup.py
+1
-1
No files found.
environment.yml
View file @
5f5a79a7
...
@@ -3,6 +3,7 @@ channels:
...
@@ -3,6 +3,7 @@ channels:
-
conda-forge
-
conda-forge
-
bioconda
-
bioconda
-
pytorch
-
pytorch
-
nvidia
dependencies
:
dependencies
:
-
python=3.9
-
python=3.9
-
libgcc=7.2
-
libgcc=7.2
...
@@ -10,17 +11,16 @@ dependencies:
...
@@ -10,17 +11,16 @@ dependencies:
-
pip
-
pip
-
openmm=7.7
-
openmm=7.7
-
pdbfixer
-
pdbfixer
-
cudatoolkit==11.3.*
-
pytorch-lightning
-
pytorch-lightning==1.5.10
-
biopython==1.79
-
biopython==1.79
-
numpy
==1.21
-
numpy
-
pandas
==2.0
-
pandas
-
PyYAML==5.4.1
-
PyYAML==5.4.1
-
requests
-
requests
-
scipy
==1.7
-
scipy
-
tqdm==4.62.2
-
tqdm==4.62.2
-
typing-extensions
==3.10
-
typing-extensions
-
wandb
==0.12.21
-
wandb
-
modelcif==0.7
-
modelcif==0.7
-
awscli
-
awscli
-
ml-collections
-
ml-collections
...
@@ -29,9 +29,10 @@ dependencies:
...
@@ -29,9 +29,10 @@ dependencies:
-
bioconda::hmmer==3.3.2
-
bioconda::hmmer==3.3.2
-
bioconda::hhsuite==3.3.0
-
bioconda::hhsuite==3.3.0
-
bioconda::kalign2==2.04
-
bioconda::kalign2==2.04
-
pytorch::pytorch=1.12.*
-
pytorch::pytorch=2.1
-
pytorch::pytorch-cuda=12.1
-
pip
:
-
pip
:
-
deepspeed==0.12.4
-
deepspeed==0.12.4
-
dm-tree==0.1.6
-
dm-tree==0.1.6
-
git+https://github.com/NVIDIA/dllogger.git
-
git+https://github.com/NVIDIA/dllogger.git
-
git+https://github.com/Dao-AILab/flash-attention.git@5b838a8
-
flash-attn
openfold/data/data_pipeline.py
View file @
5f5a79a7
...
@@ -244,7 +244,7 @@ def make_msa_features(msas: Sequence[parsers.Msa]) -> FeatureDict:
...
@@ -244,7 +244,7 @@ def make_msa_features(msas: Sequence[parsers.Msa]) -> FeatureDict:
features
[
"num_alignments"
]
=
np
.
array
(
features
[
"num_alignments"
]
=
np
.
array
(
[
num_alignments
]
*
num_res
,
dtype
=
np
.
int32
[
num_alignments
]
*
num_res
,
dtype
=
np
.
int32
)
)
features
[
"msa_species_identifiers"
]
=
np
.
array
(
species_ids
,
dtype
=
np
.
object
_
)
features
[
"msa_species_identifiers"
]
=
np
.
array
(
species_ids
,
dtype
=
object
)
return
features
return
features
...
@@ -590,7 +590,7 @@ def convert_monomer_features(
...
@@ -590,7 +590,7 @@ def convert_monomer_features(
)
->
FeatureDict
:
)
->
FeatureDict
:
"""Reshapes and modifies monomer features for multimer models."""
"""Reshapes and modifies monomer features for multimer models."""
converted
=
{}
converted
=
{}
converted
[
'auth_chain_id'
]
=
np
.
asarray
(
chain_id
,
dtype
=
np
.
object
_
)
converted
[
'auth_chain_id'
]
=
np
.
asarray
(
chain_id
,
dtype
=
object
)
unnecessary_leading_dim_feats
=
{
unnecessary_leading_dim_feats
=
{
'sequence'
,
'domain_name'
,
'num_alignments'
,
'seq_length'
'sequence'
,
'domain_name'
,
'num_alignments'
,
'seq_length'
}
}
...
@@ -1296,7 +1296,7 @@ class DataPipelineMultimer:
...
@@ -1296,7 +1296,7 @@ class DataPipelineMultimer:
)
)
mmcif_feats
[
"release_date"
]
=
np
.
array
(
mmcif_feats
[
"release_date"
]
=
np
.
array
(
[
mmcif_object
.
header
[
"release_date"
].
encode
(
"utf-8"
)],
dtype
=
np
.
object
_
[
mmcif_object
.
header
[
"release_date"
].
encode
(
"utf-8"
)],
dtype
=
object
)
)
mmcif_feats
[
"is_distillation"
]
=
np
.
array
(
0.
,
dtype
=
np
.
float32
)
mmcif_feats
[
"is_distillation"
]
=
np
.
array
(
0.
,
dtype
=
np
.
float32
)
...
...
openfold/model/primitives.py
View file @
5f5a79a7
...
@@ -28,7 +28,7 @@ if ds4s_is_installed:
...
@@ -28,7 +28,7 @@ if ds4s_is_installed:
fa_is_installed
=
importlib
.
util
.
find_spec
(
"flash_attn"
)
is
not
None
fa_is_installed
=
importlib
.
util
.
find_spec
(
"flash_attn"
)
is
not
None
if
fa_is_installed
:
if
fa_is_installed
:
from
flash_attn.bert_padding
import
unpad_input
from
flash_attn.bert_padding
import
unpad_input
from
flash_attn.flash_attn_interface
import
flash_attn_
unpadded
_kvpacked_func
from
flash_attn.flash_attn_interface
import
flash_attn_
varlen
_kvpacked_func
import
torch
import
torch
import
torch.nn
as
nn
import
torch.nn
as
nn
...
@@ -811,7 +811,7 @@ def _flash_attn(q, k, v, kv_mask):
...
@@ -811,7 +811,7 @@ def _flash_attn(q, k, v, kv_mask):
kv_unpad
,
_
,
kv_cu_seqlens
,
kv_max_s
=
unpad_input
(
kv
,
kv_mask
)
kv_unpad
,
_
,
kv_cu_seqlens
,
kv_max_s
=
unpad_input
(
kv
,
kv_mask
)
kv_unpad
=
kv_unpad
.
reshape
(
-
1
,
*
kv_shape
[
-
3
:])
kv_unpad
=
kv_unpad
.
reshape
(
-
1
,
*
kv_shape
[
-
3
:])
out
=
flash_attn_
unpadded
_kvpacked_func
(
out
=
flash_attn_
varlen
_kvpacked_func
(
q
,
q
,
kv_unpad
,
kv_unpad
,
q_cu_seqlens
,
q_cu_seqlens
,
...
...
setup.py
View file @
5f5a79a7
...
@@ -29,7 +29,7 @@ version_dependent_macros = [
...
@@ -29,7 +29,7 @@ version_dependent_macros = [
]
]
extra_cuda_flags
=
[
extra_cuda_flags
=
[
'-std=c++1
4
'
,
'-std=c++1
7
'
,
'-maxrregcount=50'
,
'-maxrregcount=50'
,
'-U__CUDA_NO_HALF_OPERATORS__'
,
'-U__CUDA_NO_HALF_OPERATORS__'
,
'-U__CUDA_NO_HALF_CONVERSIONS__'
,
'-U__CUDA_NO_HALF_CONVERSIONS__'
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment