Commit ac058f54 authored by Gustaf Ahdritz's avatar Gustaf Ahdritz
Browse files

Add some more dependencies

parent 8cb569f2
......@@ -23,8 +23,10 @@ in the future.
## Installation (Linux)
Python dependencies available through `pip` are provided in `requirements.txt`.
OpenFold also depends on `openmm==7.5.1` and `pdbfixer`, which are only
available via `conda`.
OpenFold depends on `openmm==7.5.1` and `pdbfixer`, which are only available
via `conda`. For producing sequence alignments, you'll also need `jackhmmer`,
`kalign`, and the [HH-Suite](https://github.com/soedinglab/hh-suite) installed
on your system.
For convenience, we provide a script that installs Miniconda locally, creates a
`conda` virtual environment, installs all Python dependencies, and downloads
......
......@@ -349,7 +349,7 @@ class EvoformerStack(nn.Module):
z:
[*, N_res, N_res, C_z] pair embedding
s:
[*, N_res, C_s] single embedding
[*, N_res, C_s] single embedding (or None if extra MSA stack)
"""
m, z = checkpoint_blocks(
blocks=[
......
......@@ -97,11 +97,15 @@ class MSAAttention(nn.Module):
# [*, N_seq, 1, 1, N_res]
bias = (self.inf * (mask - 1))[..., :, None, None, :]
# This step simply returns a larger view of the bias, and does not
# consume additional memory.
# [*, N_seq, no_heads, N_res, N_res]
bias = bias.expand(
((-1,) * len(bias.shape[:-4])) + (-1, self.no_heads, n_res, -1)
)
biases = [bias]
if self.pair_bias:
# [*, N_res, N_res, C_z]
z = self.layer_norm_z(z)
......@@ -190,7 +194,7 @@ class MSAColumnAttention(MSAAttention):
[*, N_seq, N_res, C_m] MSA embedding
mask:
[*, N_seq, N_res] MSA mask
"""
"""
# [*, N_res, N_seq, C_in]
m = m.transpose(-2, -3)
if mask is not None:
......
......@@ -207,7 +207,7 @@ class Attention(nn.Module):
self.gating = gating
# DISCREPANCY: c_hidden is not the per-head channel dimension, as
# stated in the supplement, but the overall channel dimension
# stated in the supplement, but the overall channel dimension.
self.linear_q = Linear(
self.c_q, self.c_hidden * self.no_heads, bias=False, init="glorot"
......@@ -271,9 +271,11 @@ class Attention(nn.Module):
norm = 1 / math.sqrt(self.c_hidden) # [1]
a *= norm
if biases is not None:
for b in biases:
a = a + b
a = self.softmax(a)
# [*, H, V, C_hidden]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment