Commit ac058f54 authored by Gustaf Ahdritz's avatar Gustaf Ahdritz
Browse files

Add some more dependencies

parent 8cb569f2
...@@ -23,8 +23,10 @@ in the future. ...@@ -23,8 +23,10 @@ in the future.
## Installation (Linux) ## Installation (Linux)
Python dependencies available through `pip` are provided in `requirements.txt`. Python dependencies available through `pip` are provided in `requirements.txt`.
OpenFold also depends on `openmm==7.5.1` and `pdbfixer`, which are only OpenFold depends on `openmm==7.5.1` and `pdbfixer`, which are only available
available via `conda`. via `conda`. For producing sequence alignments, you'll also need `jackhmmer`,
`kalign`, and the [HH-Suite](https://github.com/soedinglab/hh-suite) installed
on your system.
For convenience, we provide a script that installs Miniconda locally, creates a For convenience, we provide a script that installs Miniconda locally, creates a
`conda` virtual environment, installs all Python dependencies, and downloads `conda` virtual environment, installs all Python dependencies, and downloads
......
...@@ -349,7 +349,7 @@ class EvoformerStack(nn.Module): ...@@ -349,7 +349,7 @@ class EvoformerStack(nn.Module):
z: z:
[*, N_res, N_res, C_z] pair embedding [*, N_res, N_res, C_z] pair embedding
s: s:
[*, N_res, C_s] single embedding [*, N_res, C_s] single embedding (or None if extra MSA stack)
""" """
m, z = checkpoint_blocks( m, z = checkpoint_blocks(
blocks=[ blocks=[
......
...@@ -97,11 +97,15 @@ class MSAAttention(nn.Module): ...@@ -97,11 +97,15 @@ class MSAAttention(nn.Module):
# [*, N_seq, 1, 1, N_res] # [*, N_seq, 1, 1, N_res]
bias = (self.inf * (mask - 1))[..., :, None, None, :] bias = (self.inf * (mask - 1))[..., :, None, None, :]
# This step simply returns a larger view of the bias, and does not
# consume additional memory.
# [*, N_seq, no_heads, N_res, N_res] # [*, N_seq, no_heads, N_res, N_res]
bias = bias.expand( bias = bias.expand(
((-1,) * len(bias.shape[:-4])) + (-1, self.no_heads, n_res, -1) ((-1,) * len(bias.shape[:-4])) + (-1, self.no_heads, n_res, -1)
) )
biases = [bias] biases = [bias]
if self.pair_bias: if self.pair_bias:
# [*, N_res, N_res, C_z] # [*, N_res, N_res, C_z]
z = self.layer_norm_z(z) z = self.layer_norm_z(z)
......
...@@ -207,7 +207,7 @@ class Attention(nn.Module): ...@@ -207,7 +207,7 @@ class Attention(nn.Module):
self.gating = gating self.gating = gating
# DISCREPANCY: c_hidden is not the per-head channel dimension, as # DISCREPANCY: c_hidden is not the per-head channel dimension, as
# stated in the supplement, but the overall channel dimension # stated in the supplement, but the overall channel dimension.
self.linear_q = Linear( self.linear_q = Linear(
self.c_q, self.c_hidden * self.no_heads, bias=False, init="glorot" self.c_q, self.c_hidden * self.no_heads, bias=False, init="glorot"
...@@ -271,9 +271,11 @@ class Attention(nn.Module): ...@@ -271,9 +271,11 @@ class Attention(nn.Module):
norm = 1 / math.sqrt(self.c_hidden) # [1] norm = 1 / math.sqrt(self.c_hidden) # [1]
a *= norm a *= norm
if biases is not None: if biases is not None:
for b in biases: for b in biases:
a = a + b a = a + b
a = self.softmax(a) a = self.softmax(a)
# [*, H, V, C_hidden] # [*, H, V, C_hidden]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment