Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
OpenFold
Commits
54d414e4
Commit
54d414e4
authored
Nov 10, 2023
by
Christina Floristean
Browse files
Return to regular kernel usage
parent
b7f35dce
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
7 additions
and
8 deletions
+7
-8
environment.yml
environment.yml
+1
-1
openfold/model/primitives.py
openfold/model/primitives.py
+6
-7
No files found.
environment.yml
View file @
54d414e4
...
@@ -30,7 +30,7 @@ dependencies:
...
@@ -30,7 +30,7 @@ dependencies:
-
bioconda::kalign2==2.04
-
bioconda::kalign2==2.04
-
pytorch::pytorch=1.12.*
-
pytorch::pytorch=1.12.*
-
pip
:
-
pip
:
-
d
eep
s
peed
==0.12.2
-
git+https://github.com/microsoft/D
eep
S
peed
.git@4388a60
# Replace when version becomes available
-
dm-tree==0.1.6
-
dm-tree==0.1.6
-
git+https://github.com/NVIDIA/dllogger.git
-
git+https://github.com/NVIDIA/dllogger.git
-
git+https://github.com/Dao-AILab/flash-attention.git@5b838a8
-
git+https://github.com/Dao-AILab/flash-attention.git@5b838a8
openfold/model/primitives.py
View file @
54d414e4
...
@@ -23,7 +23,7 @@ if deepspeed_is_installed:
...
@@ -23,7 +23,7 @@ if deepspeed_is_installed:
import
deepspeed
import
deepspeed
if
ds4s_is_installed
:
if
ds4s_is_installed
:
from
deepspeed.ops.deepspeed4science
import
Evoformer
Fused
Attention
from
deepspeed.ops.deepspeed4science
import
DS4Sci_
EvoformerAttention
fa_is_installed
=
importlib
.
util
.
find_spec
(
"flash_attn"
)
is
not
None
fa_is_installed
=
importlib
.
util
.
find_spec
(
"flash_attn"
)
is
not
None
if
fa_is_installed
:
if
fa_is_installed
:
...
@@ -661,19 +661,18 @@ def _deepspeed_evo_attn(
...
@@ -661,19 +661,18 @@ def _deepspeed_evo_attn(
v
=
reshape_dims
(
v
)
v
=
reshape_dims
(
v
)
biases
=
[
reshape_dims
(
b
)
for
b
in
biases
]
biases
=
[
reshape_dims
(
b
)
for
b
in
biases
]
biases
.
extend
([
None
]
*
(
2
-
len
(
biases
)))
# DeepSpeed attn. kernel requires inputs to be type bf16 or fp16
# DeepSpeed attn. kernel requires inputs to be type bf16 or fp16
# Cast to bf16 so kernel can be used during inference
# Cast to bf16 so kernel can be used during inference
orig_dtype
=
q
.
dtype
orig_dtype
=
q
.
dtype
if
orig_dtype
not
in
[
torch
.
bfloat16
,
torch
.
float16
]:
if
orig_dtype
not
in
[
torch
.
bfloat16
,
torch
.
float16
]:
inputs_bf16
=
[
x
.
to
(
dtype
=
torch
.
bfloat16
)
if
x
is
not
None
else
x
o
=
DS4Sci_EvoformerAttention
(
q
.
to
(
dtype
=
torch
.
bfloat16
),
for
x
in
(
q
,
k
,
v
,
biases
[
0
],
biases
[
1
])]
k
.
to
(
dtype
=
torch
.
bfloat16
),
o
=
EvoformerFusedAttention
.
apply
(
*
inputs_bf16
)
v
.
to
(
dtype
=
torch
.
bfloat16
),
[
b
.
to
(
dtype
=
torch
.
bfloat16
)
for
b
in
biases
])
o
=
o
.
to
(
dtype
=
orig_dtype
)
o
=
o
.
to
(
dtype
=
orig_dtype
)
else
:
else
:
o
=
Evoformer
Fused
Attention
.
apply
(
q
,
k
,
v
,
biases
[
0
],
biases
[
1
]
)
o
=
DS4Sci_
EvoformerAttention
(
q
,
k
,
v
,
biases
)
o
=
o
.
reshape
(
orig_shape
)
o
=
o
.
reshape
(
orig_shape
)
return
o
return
o
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment