Commit 0be2b30b authored by Augustin-Zidek's avatar Augustin-Zidek
Browse files

Add code for AlphaFold-Multimer.

PiperOrigin-RevId: 407076987
parent 1d43aaff
...@@ -13,72 +13,118 @@ ...@@ -13,72 +13,118 @@
# limitations under the License. # limitations under the License.
"""A collection of common Haiku modules for use in protein folding.""" """A collection of common Haiku modules for use in protein folding."""
import numbers
from typing import Union, Sequence
import haiku as hk import haiku as hk
import jax.numpy as jnp import jax.numpy as jnp
import numpy as np
# Constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
TRUNCATED_NORMAL_STDDEV_FACTOR = np.asarray(.87962566103423978,
dtype=np.float32)
def get_initializer_scale(initializer_name, input_shape):
"""Get Initializer for weights and scale to multiply activations by."""
if initializer_name == 'zeros':
w_init = hk.initializers.Constant(0.0)
else:
# fan-in scaling
scale = 1.
for channel_dim in input_shape:
scale /= channel_dim
if initializer_name == 'relu':
scale *= 2
noise_scale = scale
stddev = np.sqrt(noise_scale)
# Adjust stddev for truncation.
stddev = stddev / TRUNCATED_NORMAL_STDDEV_FACTOR
w_init = hk.initializers.TruncatedNormal(mean=0.0, stddev=stddev)
return w_init
class Linear(hk.Module): class Linear(hk.Module):
"""Protein folding specific Linear Module. """Protein folding specific Linear module.
This differs from the standard Haiku Linear in a few ways: This differs from the standard Haiku Linear in a few ways:
* It supports inputs of arbitrary rank * It supports inputs and outputs of arbitrary rank
* Initializers are specified by strings * Initializers are specified by strings
""" """
def __init__(self, def __init__(self,
num_output: int, num_output: Union[int, Sequence[int]],
initializer: str = 'linear', initializer: str = 'linear',
num_input_dims: int = 1,
use_bias: bool = True, use_bias: bool = True,
bias_init: float = 0., bias_init: float = 0.,
precision = None,
name: str = 'linear'): name: str = 'linear'):
"""Constructs Linear Module. """Constructs Linear Module.
Args: Args:
num_output: number of output channels. num_output: Number of output channels. Can be tuple when outputting
multiple dimensions.
initializer: What initializer to use, should be one of {'linear', 'relu', initializer: What initializer to use, should be one of {'linear', 'relu',
'zeros'} 'zeros'}
num_input_dims: Number of dimensions from the end to project.
use_bias: Whether to include trainable bias use_bias: Whether to include trainable bias
bias_init: Value used to initialize bias. bias_init: Value used to initialize bias.
name: name of module, used for name scopes. precision: What precision to use for matrix multiplication, defaults
to None.
name: Name of module, used for name scopes.
""" """
super().__init__(name=name) super().__init__(name=name)
self.num_output = num_output if isinstance(num_output, numbers.Integral):
self.output_shape = (num_output,)
else:
self.output_shape = tuple(num_output)
self.initializer = initializer self.initializer = initializer
self.use_bias = use_bias self.use_bias = use_bias
self.bias_init = bias_init self.bias_init = bias_init
self.num_input_dims = num_input_dims
self.num_output_dims = len(self.output_shape)
self.precision = precision
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray: def __call__(self, inputs):
"""Connects Module. """Connects Module.
Args: Args:
inputs: Tensor of shape [..., num_channel] inputs: Tensor with at least num_input_dims dimensions.
Returns: Returns:
output of shape [..., num_output] output of shape [...] + num_output.
""" """
n_channels = int(inputs.shape[-1])
weight_shape = [n_channels, self.num_output] num_input_dims = self.num_input_dims
if self.initializer == 'linear':
weight_init = hk.initializers.VarianceScaling(mode='fan_in', scale=1.) if self.num_input_dims > 0:
elif self.initializer == 'relu': in_shape = inputs.shape[-self.num_input_dims:]
weight_init = hk.initializers.VarianceScaling(mode='fan_in', scale=2.) else:
elif self.initializer == 'zeros': in_shape = ()
weight_init = hk.initializers.Constant(0.0)
weight_init = get_initializer_scale(self.initializer, in_shape)
in_letters = 'abcde'[:self.num_input_dims]
out_letters = 'hijkl'[:self.num_output_dims]
weight_shape = in_shape + self.output_shape
weights = hk.get_parameter('weights', weight_shape, inputs.dtype, weights = hk.get_parameter('weights', weight_shape, inputs.dtype,
weight_init) weight_init)
# this is equivalent to einsum('...c,cd->...d', inputs, weights) equation = f'...{in_letters}, {in_letters}{out_letters}->...{out_letters}'
# but turns out to be slightly faster
inputs = jnp.swapaxes(inputs, -1, -2) output = jnp.einsum(equation, inputs, weights, precision=self.precision)
output = jnp.einsum('...cb,cd->...db', inputs, weights)
output = jnp.swapaxes(output, -1, -2)
if self.use_bias: if self.use_bias:
bias = hk.get_parameter('bias', [self.num_output], inputs.dtype, bias = hk.get_parameter('bias', self.output_shape, inputs.dtype,
hk.initializers.Constant(self.bias_init)) hk.initializers.Constant(self.bias_init))
output += bias output += bias
return output return output
...@@ -17,7 +17,6 @@ import copy ...@@ -17,7 +17,6 @@ import copy
from alphafold.model.tf import shape_placeholders from alphafold.model.tf import shape_placeholders
import ml_collections import ml_collections
NUM_RES = shape_placeholders.NUM_RES NUM_RES = shape_placeholders.NUM_RES
NUM_MSA_SEQ = shape_placeholders.NUM_MSA_SEQ NUM_MSA_SEQ = shape_placeholders.NUM_MSA_SEQ
NUM_EXTRA_SEQ = shape_placeholders.NUM_EXTRA_SEQ NUM_EXTRA_SEQ = shape_placeholders.NUM_EXTRA_SEQ
...@@ -27,6 +26,9 @@ NUM_TEMPLATES = shape_placeholders.NUM_TEMPLATES ...@@ -27,6 +26,9 @@ NUM_TEMPLATES = shape_placeholders.NUM_TEMPLATES
def model_config(name: str) -> ml_collections.ConfigDict: def model_config(name: str) -> ml_collections.ConfigDict:
"""Get the ConfigDict of a CASP14 model.""" """Get the ConfigDict of a CASP14 model."""
if 'multimer' in name:
return CONFIG_MULTIMER
if name not in CONFIG_DIFFS: if name not in CONFIG_DIFFS:
raise ValueError(f'Invalid model name {name}.') raise ValueError(f'Invalid model name {name}.')
cfg = copy.deepcopy(CONFIG) cfg = copy.deepcopy(CONFIG)
...@@ -34,6 +36,32 @@ def model_config(name: str) -> ml_collections.ConfigDict: ...@@ -34,6 +36,32 @@ def model_config(name: str) -> ml_collections.ConfigDict:
return cfg return cfg
MODEL_PRESETS = {
'monomer': (
'model_1',
'model_2',
'model_3',
'model_4',
'model_5',
),
'monomer_ptm': (
'model_1_ptm',
'model_2_ptm',
'model_3_ptm',
'model_4_ptm',
'model_5_ptm',
),
'multimer': (
'model_1_multimer',
'model_2_multimer',
'model_3_multimer',
'model_4_multimer',
'model_5_multimer',
),
}
MODEL_PRESETS['monomer_casp14'] = MODEL_PRESETS['monomer']
CONFIG_DIFFS = { CONFIG_DIFFS = {
'model_1': { 'model_1': {
# Jumper et al. (2021) Suppl. Table 5, Model 1.1.1 # Jumper et al. (2021) Suppl. Table 5, Model 1.1.1
...@@ -206,6 +234,7 @@ CONFIG = ml_collections.ConfigDict({ ...@@ -206,6 +234,7 @@ CONFIG = ml_collections.ConfigDict({
'shared_dropout': True 'shared_dropout': True
}, },
'outer_product_mean': { 'outer_product_mean': {
'first': False,
'chunk_size': 128, 'chunk_size': 128,
'dropout_rate': 0.0, 'dropout_rate': 0.0,
'num_outer_channel': 32, 'num_outer_channel': 32,
...@@ -322,6 +351,7 @@ CONFIG = ml_collections.ConfigDict({ ...@@ -322,6 +351,7 @@ CONFIG = ml_collections.ConfigDict({
}, },
'global_config': { 'global_config': {
'deterministic': False, 'deterministic': False,
'multimer_mode': False,
'subbatch_size': 4, 'subbatch_size': 4,
'use_remat': False, 'use_remat': False,
'zero_init': True 'zero_init': True
...@@ -400,3 +430,228 @@ CONFIG = ml_collections.ConfigDict({ ...@@ -400,3 +430,228 @@ CONFIG = ml_collections.ConfigDict({
'resample_msa_in_recycling': True 'resample_msa_in_recycling': True
}, },
}) })
CONFIG_MULTIMER = ml_collections.ConfigDict({
'model': {
'embeddings_and_evoformer': {
'evoformer_num_block': 48,
'evoformer': {
'msa_column_attention': {
'dropout_rate': 0.0,
'gating': True,
'num_head': 8,
'orientation': 'per_column',
'shared_dropout': True
},
'msa_row_attention_with_pair_bias': {
'dropout_rate': 0.15,
'gating': True,
'num_head': 8,
'orientation': 'per_row',
'shared_dropout': True
},
'msa_transition': {
'dropout_rate': 0.0,
'num_intermediate_factor': 4,
'orientation': 'per_row',
'shared_dropout': True
},
'outer_product_mean': {
'chunk_size': 128,
'dropout_rate': 0.0,
'first': True,
'num_outer_channel': 32,
'orientation': 'per_row',
'shared_dropout': True
},
'pair_transition': {
'dropout_rate': 0.0,
'num_intermediate_factor': 4,
'orientation': 'per_row',
'shared_dropout': True
},
'triangle_attention_ending_node': {
'dropout_rate': 0.25,
'gating': True,
'num_head': 4,
'orientation': 'per_column',
'shared_dropout': True
},
'triangle_attention_starting_node': {
'dropout_rate': 0.25,
'gating': True,
'num_head': 4,
'orientation': 'per_row',
'shared_dropout': True
},
'triangle_multiplication_incoming': {
'dropout_rate': 0.25,
'equation': 'kjc,kic->ijc',
'num_intermediate_channel': 128,
'orientation': 'per_row',
'shared_dropout': True
},
'triangle_multiplication_outgoing': {
'dropout_rate': 0.25,
'equation': 'ikc,jkc->ijc',
'num_intermediate_channel': 128,
'orientation': 'per_row',
'shared_dropout': True
}
},
'extra_msa_channel': 64,
'extra_msa_stack_num_block': 4,
'num_msa': 252,
'num_extra_msa': 1152,
'masked_msa': {
'profile_prob': 0.1,
'replace_fraction': 0.15,
'same_prob': 0.1,
'uniform_prob': 0.1
},
'use_chain_relative': True,
'max_relative_chain': 2,
'max_relative_idx': 32,
'seq_channel': 384,
'msa_channel': 256,
'pair_channel': 128,
'prev_pos': {
'max_bin': 20.75,
'min_bin': 3.25,
'num_bins': 15
},
'recycle_features': True,
'recycle_pos': True,
'template': {
'attention': {
'gating': False,
'num_head': 4
},
'dgram_features': {
'max_bin': 50.75,
'min_bin': 3.25,
'num_bins': 39
},
'enabled': True,
'max_templates': 4,
'num_channels': 64,
'subbatch_size': 128,
'template_pair_stack': {
'num_block': 2,
'pair_transition': {
'dropout_rate': 0.0,
'num_intermediate_factor': 2,
'orientation': 'per_row',
'shared_dropout': True
},
'triangle_attention_ending_node': {
'dropout_rate': 0.25,
'gating': True,
'num_head': 4,
'orientation': 'per_column',
'shared_dropout': True
},
'triangle_attention_starting_node': {
'dropout_rate': 0.25,
'gating': True,
'num_head': 4,
'orientation': 'per_row',
'shared_dropout': True
},
'triangle_multiplication_incoming': {
'dropout_rate': 0.25,
'equation': 'kjc,kic->ijc',
'num_intermediate_channel': 64,
'orientation': 'per_row',
'shared_dropout': True
},
'triangle_multiplication_outgoing': {
'dropout_rate': 0.25,
'equation': 'ikc,jkc->ijc',
'num_intermediate_channel': 64,
'orientation': 'per_row',
'shared_dropout': True
}
}
},
},
'global_config': {
'deterministic': False,
'multimer_mode': True,
'subbatch_size': 4,
'use_remat': False,
'zero_init': True
},
'heads': {
'distogram': {
'first_break': 2.3125,
'last_break': 21.6875,
'num_bins': 64,
'weight': 0.3
},
'experimentally_resolved': {
'filter_by_resolution': True,
'max_resolution': 3.0,
'min_resolution': 0.1,
'weight': 0.01
},
'masked_msa': {
'weight': 2.0
},
'predicted_aligned_error': {
'filter_by_resolution': True,
'max_error_bin': 31.0,
'max_resolution': 3.0,
'min_resolution': 0.1,
'num_bins': 64,
'num_channels': 128,
'weight': 0.1
},
'predicted_lddt': {
'filter_by_resolution': True,
'max_resolution': 3.0,
'min_resolution': 0.1,
'num_bins': 50,
'num_channels': 128,
'weight': 0.01
},
'structure_module': {
'angle_norm_weight': 0.01,
'chi_weight': 0.5,
'clash_overlap_tolerance': 1.5,
'dropout': 0.1,
'interface_fape': {
'atom_clamp_distance': 1000.0,
'loss_unit_distance': 20.0
},
'intra_chain_fape': {
'atom_clamp_distance': 10.0,
'loss_unit_distance': 10.0
},
'num_channel': 384,
'num_head': 12,
'num_layer': 8,
'num_layer_in_transition': 3,
'num_point_qk': 4,
'num_point_v': 8,
'num_scalar_qk': 16,
'num_scalar_v': 16,
'position_scale': 20.0,
'sidechain': {
'atom_clamp_distance': 10.0,
'loss_unit_distance': 10.0,
'num_channel': 128,
'num_residual_block': 2,
'weight_frac': 0.5
},
'structural_violation_loss_weight': 1.0,
'violation_tolerance_factor': 12.0,
'weight': 1.0
}
},
'num_ensemble_eval': 1,
'num_recycle': 3,
'resample_msa_in_recycling': True
}
})
...@@ -15,8 +15,10 @@ ...@@ -15,8 +15,10 @@
"""Code to generate processed features.""" """Code to generate processed features."""
import copy import copy
from typing import List, Mapping, Tuple from typing import List, Mapping, Tuple
from alphafold.model.tf import input_pipeline from alphafold.model.tf import input_pipeline
from alphafold.model.tf import proteins_dataset from alphafold.model.tf import proteins_dataset
import ml_collections import ml_collections
import numpy as np import numpy as np
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
......
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules and utilities for the structure module in the multimer system."""
import functools
import numbers
from typing import Any, Dict, Iterable, Mapping, Optional, Tuple, Union
from alphafold.common import residue_constants
from alphafold.model import all_atom_multimer
from alphafold.model import common_modules
from alphafold.model import geometry
from alphafold.model import modules
from alphafold.model import prng
from alphafold.model import utils
from alphafold.model.geometry import utils as geometry_utils
import haiku as hk
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
EPSILON = 1e-8
Float = Union[float, jnp.ndarray]
def squared_difference(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:
"""Computes Squared difference between two arrays."""
return jnp.square(x - y)
def make_backbone_affine(
positions: geometry.Vec3Array,
mask: jnp.ndarray,
aatype: jnp.ndarray,
) -> Tuple[geometry.Rigid3Array, jnp.ndarray]:
"""Make backbone Rigid3Array and mask."""
del aatype
a = residue_constants.atom_order['N']
b = residue_constants.atom_order['CA']
c = residue_constants.atom_order['C']
rigid_mask = (mask[:, a] * mask[:, b] * mask[:, c]).astype(
jnp.float32)
rigid = all_atom_multimer.make_transform_from_reference(
a_xyz=positions[:, a], b_xyz=positions[:, b], c_xyz=positions[:, c])
return rigid, rigid_mask
class QuatRigid(hk.Module):
"""Module for projecting Rigids via a quaternion."""
def __init__(self,
global_config: ml_collections.ConfigDict,
rigid_shape: Union[int, Iterable[int]] = tuple(),
full_quat: bool = False,
init: str = 'zeros',
name: str = 'quat_rigid'):
"""Module projecting a Rigid Object.
For this Module the Rotation is parametrized as a quaternion,
If 'full_quat' is True a 4 vector is produced for the rotation which is
normalized and treated as a quaternion.
When 'full_quat' is False a 3 vector is produced and the 1st component of
the quaternion is set to 1.
Args:
global_config: Global Config, used to set certain properties of underlying
Linear module, see common_modules.Linear for details.
rigid_shape: Shape of Rigids relative to shape of activations, e.g. when
activations have shape (n,) and this is (m,) output will be (n, m)
full_quat: Whether to parametrize rotation using full quaternion.
init: initializer to use, see common_modules.Linear for details
name: Name to use for module.
"""
self.init = init
self.global_config = global_config
if isinstance(rigid_shape, int):
self.rigid_shape = (rigid_shape,)
else:
self.rigid_shape = tuple(rigid_shape)
self.full_quat = full_quat
super(QuatRigid, self).__init__(name=name)
def __call__(self, activations: jnp.ndarray) -> geometry.Rigid3Array:
"""Executes Module.
This returns a set of rigid with the same shape as activations, projecting
the channel dimension, rigid_shape controls the trailing dimensions.
For example when activations is shape (12, 5) and rigid_shape is (3, 2)
then the shape of the output rigids will be (12, 3, 2).
This also supports passing in an empty tuple for rigid shape, in that case
the example would produce a rigid of shape (12,).
Args:
activations: Activations to use for projection, shape [..., num_channel]
Returns:
Rigid transformations with shape [...] + rigid_shape
"""
if self.full_quat:
rigid_dim = 7
else:
rigid_dim = 6
linear_dims = self.rigid_shape + (rigid_dim,)
rigid_flat = common_modules.Linear(
linear_dims,
initializer=self.init,
precision=jax.lax.Precision.HIGHEST,
name='rigid')(
activations)
rigid_flat = geometry_utils.unstack(rigid_flat)
if self.full_quat:
qw, qx, qy, qz = rigid_flat[:4]
translation = rigid_flat[4:]
else:
qx, qy, qz = rigid_flat[:3]
qw = jnp.ones_like(qx)
translation = rigid_flat[3:]
rotation = geometry.Rot3Array.from_quaternion(
qw, qx, qy, qz, normalize=True)
translation = geometry.Vec3Array(*translation)
return geometry.Rigid3Array(rotation, translation)
class PointProjection(hk.Module):
"""Given input reprensentation and frame produces points in global frame."""
def __init__(self,
num_points: Union[Iterable[int], int],
global_config: ml_collections.ConfigDict,
return_local_points: bool = False,
name: str = 'point_projection'):
"""Constructs Linear Module.
Args:
num_points: number of points to project. Can be tuple when outputting
multiple dimensions
global_config: Global Config, passed through to underlying Linear
return_local_points: Whether to return points in local frame as well.
name: name of module, used for name scopes.
"""
if isinstance(num_points, numbers.Integral):
self.num_points = (num_points,)
else:
self.num_points = tuple(num_points)
self.return_local_points = return_local_points
self.global_config = global_config
super().__init__(name=name)
def __call__(
self, activations: jnp.ndarray, rigids: geometry.Rigid3Array
) -> Union[geometry.Vec3Array, Tuple[geometry.Vec3Array, geometry.Vec3Array]]:
output_shape = self.num_points
output_shape = output_shape[:-1] + (3 * output_shape[-1],)
points_local = common_modules.Linear(
output_shape,
precision=jax.lax.Precision.HIGHEST,
name='point_projection')(
activations)
points_local = jnp.split(points_local, 3, axis=-1)
points_local = geometry.Vec3Array(*points_local)
rigids = rigids[(...,) + (None,) * len(output_shape)]
points_global = rigids.apply_to_point(points_local)
if self.return_local_points:
return points_global, points_local
else:
return points_global
class InvariantPointAttention(hk.Module):
"""Covariant attention module.
The high-level idea is that this attention module works over a set of points
and associated orientations in 3D space (e.g. protein residues).
Each residue outputs a set of queries and keys as points in their local
reference frame. The attention is then defined as the euclidean distance
between the queries and keys in the global frame.
"""
def __init__(self,
config: ml_collections.ConfigDict,
global_config: ml_collections.ConfigDict,
dist_epsilon: float = 1e-8,
name: str = 'invariant_point_attention'):
"""Initialize.
Args:
config: iterative Fold Head Config
global_config: Global Config of Model.
dist_epsilon: Small value to avoid NaN in distance calculation.
name: Sonnet name.
"""
super().__init__(name=name)
self._dist_epsilon = dist_epsilon
self._zero_initialize_last = global_config.zero_init
self.config = config
self.global_config = global_config
def __call__(
self,
inputs_1d: jnp.ndarray,
inputs_2d: jnp.ndarray,
mask: jnp.ndarray,
rigid: geometry.Rigid3Array,
) -> jnp.ndarray:
"""Compute geometric aware attention.
Given a set of query residues (defined by affines and associated scalar
features), this function computes geometric aware attention between the
query residues and target residues.
The residues produce points in their local reference frame, which
are converted into the global frame to get attention via euclidean distance.
Equivalently the target residues produce points in their local frame to be
used as attention values, which are converted into the query residues local
frames.
Args:
inputs_1d: (N, C) 1D input embedding that is the basis for the
scalar queries.
inputs_2d: (N, M, C') 2D input embedding, used for biases values in the
attention between query_inputs_1d and target_inputs_1d.
mask: (N, 1) mask to indicate query_inputs_1d that participate in
the attention.
rigid: Rigid object describing the position and orientation of
every element in query_inputs_1d.
Returns:
Transformation of the input embedding.
"""
num_head = self.config.num_head
attn_logits = 0.
num_point_qk = self.config.num_point_qk
# Each point pair (q, k) contributes Var [0.5 ||q||^2 - <q, k>] = 9 / 2
point_variance = max(num_point_qk, 1) * 9. / 2
point_weights = np.sqrt(1.0 / point_variance)
# This is equivalent to jax.nn.softplus, but avoids a bug in the test...
softplus = lambda x: jnp.logaddexp(x, jnp.zeros_like(x))
raw_point_weights = hk.get_parameter(
'trainable_point_weights',
shape=[num_head],
# softplus^{-1} (1)
init=hk.initializers.Constant(np.log(np.exp(1.) - 1.)))
# Trainable per-head weights for points.
trainable_point_weights = softplus(raw_point_weights)
point_weights *= trainable_point_weights
q_point = PointProjection([num_head, num_point_qk],
self.global_config,
name='q_point_projection')(inputs_1d,
rigid)
k_point = PointProjection([num_head, num_point_qk],
self.global_config,
name='k_point_projection')(inputs_1d,
rigid)
dist2 = geometry.square_euclidean_distance(
q_point[:, None, :, :], k_point[None, :, :, :], epsilon=0.)
attn_qk_point = -0.5 * jnp.sum(point_weights[:, None] * dist2, axis=-1)
attn_logits += attn_qk_point
num_scalar_qk = self.config.num_scalar_qk
# We assume that all queries and keys come iid from N(0, 1) distribution
# and compute the variances of the attention logits.
# Each scalar pair (q, k) contributes Var q*k = 1
scalar_variance = max(num_scalar_qk, 1) * 1.
scalar_weights = np.sqrt(1.0 / scalar_variance)
q_scalar = common_modules.Linear([num_head, num_scalar_qk],
use_bias=False,
name='q_scalar_projection')(
inputs_1d)
k_scalar = common_modules.Linear([num_head, num_scalar_qk],
use_bias=False,
name='k_scalar_projection')(
inputs_1d)
q_scalar *= scalar_weights
attn_logits += jnp.einsum('qhc,khc->qkh', q_scalar, k_scalar)
attention_2d = common_modules.Linear(
num_head, name='attention_2d')(inputs_2d)
attn_logits += attention_2d
mask_2d = mask * jnp.swapaxes(mask, -1, -2)
attn_logits -= 1e5 * (1. - mask_2d[..., None])
attn_logits *= np.sqrt(1. / 3) # Normalize by number of logit terms (3)
attn = jax.nn.softmax(attn_logits, axis=-2)
num_scalar_v = self.config.num_scalar_v
v_scalar = common_modules.Linear([num_head, num_scalar_v],
use_bias=False,
name='v_scalar_projection')(
inputs_1d)
# [num_query_residues, num_head, num_scalar_v]
result_scalar = jnp.einsum('qkh, khc->qhc', attn, v_scalar)
num_point_v = self.config.num_point_v
v_point = PointProjection([num_head, num_point_v],
self.global_config,
name='v_point_projection')(inputs_1d,
rigid)
result_point_global = jax.tree_map(
lambda x: jnp.sum(attn[..., None] * x, axis=-3), v_point[None])
# Features used in the linear output projection. Should have the size
# [num_query_residues, ?]
output_features = []
num_query_residues, _ = inputs_1d.shape
flat_shape = [num_query_residues, -1]
result_scalar = jnp.reshape(result_scalar, flat_shape)
output_features.append(result_scalar)
result_point_global = jax.tree_map(lambda r: jnp.reshape(r, flat_shape),
result_point_global)
result_point_local = rigid[..., None].apply_inverse_to_point(
result_point_global)
output_features.extend(
[result_point_local.x, result_point_local.y, result_point_local.z])
point_norms = result_point_local.norm(self._dist_epsilon)
output_features.append(point_norms)
# Dimensions: h = heads, i and j = residues,
# c = inputs_2d channels
# Contraction happens over the second residue dimension, similarly to how
# the usual attention is performed.
result_attention_over_2d = jnp.einsum('ijh, ijc->ihc', attn, inputs_2d)
output_features.append(jnp.reshape(result_attention_over_2d, flat_shape))
final_init = 'zeros' if self._zero_initialize_last else 'linear'
final_act = jnp.concatenate(output_features, axis=-1)
return common_modules.Linear(
self.config.num_channel,
initializer=final_init,
name='output_projection')(final_act)
class FoldIteration(hk.Module):
"""A single iteration of iterative folding.
First, each residue attends to all residues using InvariantPointAttention.
Then, we apply transition layers to update the hidden representations.
Finally, we use the hidden representations to produce an update to the
affine of each residue.
"""
def __init__(self,
config: ml_collections.ConfigDict,
global_config: ml_collections.ConfigDict,
name: str = 'fold_iteration'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(
self,
activations: Mapping[str, Any],
aatype: jnp.ndarray,
sequence_mask: jnp.ndarray,
update_rigid: bool,
is_training: bool,
initial_act: jnp.ndarray,
safe_key: Optional[prng.SafeKey] = None,
static_feat_2d: Optional[jnp.ndarray] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
c = self.config
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
def safe_dropout_fn(tensor, safe_key):
return modules.apply_dropout(
tensor=tensor,
safe_key=safe_key,
rate=0.0 if self.global_config.deterministic else c.dropout,
is_training=is_training)
rigid = activations['rigid']
act = activations['act']
attention_module = InvariantPointAttention(
self.config, self.global_config)
# Attention
act += attention_module(
inputs_1d=act,
inputs_2d=static_feat_2d,
mask=sequence_mask,
rigid=rigid)
safe_key, *sub_keys = safe_key.split(3)
sub_keys = iter(sub_keys)
act = safe_dropout_fn(act, next(sub_keys))
act = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
name='attention_layer_norm')(
act)
final_init = 'zeros' if self.global_config.zero_init else 'linear'
# Transition
input_act = act
for i in range(c.num_layer_in_transition):
init = 'relu' if i < c.num_layer_in_transition - 1 else final_init
act = common_modules.Linear(
c.num_channel,
initializer=init,
name='transition')(
act)
if i < c.num_layer_in_transition - 1:
act = jax.nn.relu(act)
act += input_act
act = safe_dropout_fn(act, next(sub_keys))
act = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
name='transition_layer_norm')(act)
if update_rigid:
# Rigid update
rigid_update = QuatRigid(
self.global_config, init=final_init)(
act)
rigid = rigid @ rigid_update
sc = MultiRigidSidechain(c.sidechain, self.global_config)(
rigid.scale_translation(c.position_scale), [act, initial_act], aatype)
outputs = {'rigid': rigid, 'sc': sc}
rotation = jax.tree_map(jax.lax.stop_gradient, rigid.rotation)
rigid = geometry.Rigid3Array(rotation, rigid.translation)
new_activations = {
'act': act,
'rigid': rigid
}
return new_activations, outputs
def generate_monomer_rigids(representations: Mapping[str, jnp.ndarray],
batch: Mapping[str, jnp.ndarray],
config: ml_collections.ConfigDict,
global_config: ml_collections.ConfigDict,
is_training: bool,
safe_key: prng.SafeKey
) -> Dict[str, Any]:
"""Generate predicted Rigid's for a single chain.
This is the main part of the iterative fold head - it iteratively applies
folding to produce a set of predicted residue positions.
Args:
representations: Embeddings dictionary.
batch: Batch dictionary.
config: config for the iterative fold head.
global_config: global config.
is_training: is training.
safe_key: A prng.SafeKey object that wraps a PRNG key.
Returns:
A dictionary containing residue Rigid's and sidechain positions.
"""
c = config
sequence_mask = batch['seq_mask'][:, None]
act = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True, name='single_layer_norm')(
representations['single'])
initial_act = act
act = common_modules.Linear(
c.num_channel, name='initial_projection')(act)
# Sequence Mask has extra 1 at the end.
rigid = geometry.Rigid3Array.identity(sequence_mask.shape[:-1])
fold_iteration = FoldIteration(
c, global_config, name='fold_iteration')
assert len(batch['seq_mask'].shape) == 1
activations = {
'act':
act,
'rigid':
rigid
}
act_2d = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
name='pair_layer_norm')(
representations['pair'])
safe_keys = safe_key.split(c.num_layer)
outputs = []
for key in safe_keys:
activations, output = fold_iteration(
activations,
initial_act=initial_act,
static_feat_2d=act_2d,
aatype=batch['aatype'],
safe_key=key,
sequence_mask=sequence_mask,
update_rigid=True,
is_training=is_training,
)
outputs.append(output)
output = jax.tree_multimap(lambda *x: jnp.stack(x), *outputs)
# Pass along for LDDT-Head.
output['act'] = activations['act']
return output
class StructureModule(hk.Module):
"""StructureModule as a network head.
Jumper et al. (2021) Suppl. Alg. 20 "StructureModule"
"""
def __init__(self,
config: ml_collections.ConfigDict,
global_config: ml_collections.ConfigDict,
name: str = 'structure_module'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
representations: Mapping[str, jnp.ndarray],
batch: Mapping[str, Any],
is_training: bool,
safe_key: Optional[prng.SafeKey] = None,
compute_loss: bool = False
) -> Dict[str, Any]:
c = self.config
ret = {}
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
output = generate_monomer_rigids(
representations=representations,
batch=batch,
config=self.config,
global_config=self.global_config,
is_training=is_training,
safe_key=safe_key)
ret['traj'] = output['rigid'].scale_translation(c.position_scale).to_array()
ret['sidechains'] = output['sc']
ret['sidechains']['atom_pos'] = ret['sidechains']['atom_pos'].to_array()
ret['sidechains']['frames'] = ret['sidechains']['frames'].to_array()
if 'local_atom_pos' in ret['sidechains']:
ret['sidechains']['local_atom_pos'] = ret['sidechains'][
'local_atom_pos'].to_array()
ret['sidechains']['local_frames'] = ret['sidechains'][
'local_frames'].to_array()
aatype = batch['aatype']
seq_mask = batch['seq_mask']
atom14_pred_mask = all_atom_multimer.get_atom14_mask(
aatype) * seq_mask[:, None]
atom14_pred_positions = output['sc']['atom_pos'][-1]
ret['final_atom14_positions'] = atom14_pred_positions # (N, 14, 3)
ret['final_atom14_mask'] = atom14_pred_mask # (N, 14)
atom37_mask = all_atom_multimer.get_atom37_mask(aatype) * seq_mask[:, None]
atom37_pred_positions = all_atom_multimer.atom14_to_atom37(
atom14_pred_positions, aatype)
atom37_pred_positions *= atom37_mask[:, :, None]
ret['final_atom_positions'] = atom37_pred_positions # (N, 37, 3)
ret['final_atom_mask'] = atom37_mask # (N, 37)
ret['final_rigids'] = ret['traj'][-1]
ret['act'] = output['act']
if compute_loss:
return ret
else:
no_loss_features = ['final_atom_positions', 'final_atom_mask', 'act']
no_loss_ret = {k: ret[k] for k in no_loss_features}
return no_loss_ret
def loss(self,
value: Mapping[str, Any],
batch: Mapping[str, Any]
) -> Dict[str, Any]:
raise NotImplementedError(
'This function should be called on a batch with reordered chains (see '
'Evans et al (2021) Section 7.3. Multi-Chain Permutation Alignment.')
ret = {'loss': 0.}
ret['metrics'] = {}
aatype = batch['aatype']
all_atom_positions = batch['all_atom_positions']
all_atom_positions = geometry.Vec3Array.from_array(all_atom_positions)
all_atom_mask = batch['all_atom_mask']
seq_mask = batch['seq_mask']
residue_index = batch['residue_index']
gt_rigid, gt_affine_mask = make_backbone_affine(all_atom_positions,
all_atom_mask,
aatype)
chi_angles, chi_mask = all_atom_multimer.compute_chi_angles(
all_atom_positions, all_atom_mask, aatype)
pred_mask = all_atom_multimer.get_atom14_mask(aatype)
pred_mask *= seq_mask[:, None]
pred_positions = value['final_atom14_positions']
pred_positions = geometry.Vec3Array.from_array(pred_positions)
gt_positions, gt_mask, alt_naming_is_better = compute_atom14_gt(
aatype, all_atom_positions, all_atom_mask, pred_positions)
violations = find_structural_violations(
aatype=aatype,
residue_index=residue_index,
mask=pred_mask,
pred_positions=pred_positions,
config=self.config)
sidechains = value['sidechains']
gt_chi_angles = get_renamed_chi_angles(aatype, chi_angles,
alt_naming_is_better)
# Several violation metrics:
violation_metrics = compute_violation_metrics(
residue_index=residue_index,
mask=pred_mask,
seq_mask=seq_mask,
pred_positions=pred_positions,
violations=violations)
ret['metrics'].update(violation_metrics)
target_rigid = geometry.Rigid3Array.from_array(value['traj'])
gt_frames_mask = gt_affine_mask
# Split the loss into within-chain and between-chain components.
intra_chain_mask = batch['asym_id'][:, None] == batch['asym_id'][None, :]
intra_chain_bb_loss, intra_chain_fape = backbone_loss(
gt_rigid=gt_rigid,
gt_frames_mask=gt_frames_mask,
gt_positions_mask=gt_affine_mask,
target_rigid=target_rigid,
config=self.config.intra_chain_fape,
pair_mask=intra_chain_mask)
interface_bb_loss, interface_fape = backbone_loss(
gt_rigid=gt_rigid,
gt_frames_mask=gt_frames_mask,
gt_positions_mask=gt_affine_mask,
target_rigid=target_rigid,
config=self.config.interface_fape,
pair_mask=1. - intra_chain_mask)
bb_loss = intra_chain_bb_loss + interface_bb_loss
ret['fape'] = intra_chain_fape + interface_fape
ret['bb_loss'] = bb_loss
ret['loss'] += bb_loss
pred_frames = geometry.Rigid3Array.from_array(sidechains['frames'])
pred_positions = geometry.Vec3Array.from_array(sidechains['atom_pos'])
gt_sc_frames, gt_sc_frames_mask = compute_frames(
aatype=aatype,
all_atom_positions=all_atom_positions,
all_atom_mask=all_atom_mask,
use_alt=alt_naming_is_better)
sc_loss = sidechain_loss(
gt_frames=gt_sc_frames,
gt_frames_mask=gt_sc_frames_mask,
gt_positions=gt_positions,
gt_mask=gt_mask,
pred_frames=pred_frames,
pred_positions=pred_positions,
config=self.config)
ret['loss'] = ((1 - self.config.sidechain.weight_frac) * ret['loss'] +
self.config.sidechain.weight_frac * sc_loss['loss'])
ret['sidechain_fape'] = sc_loss['fape']
unnormed_angles = sidechains['unnormalized_angles_sin_cos']
pred_angles = sidechains['angles_sin_cos']
sup_chi_loss, ret['chi_loss'], ret[
'angle_norm_loss'] = supervised_chi_loss(
sequence_mask=seq_mask,
target_chi_mask=chi_mask,
target_chi_angles=gt_chi_angles,
aatype=aatype,
pred_angles=pred_angles,
unnormed_angles=unnormed_angles,
config=self.config)
ret['loss'] += sup_chi_loss
if self.config.structural_violation_loss_weight:
ret['loss'] += structural_violation_loss(
mask=pred_mask, violations=violations, config=self.config)
return ret
def compute_atom14_gt(
aatype: jnp.ndarray,
all_atom_positions: geometry.Vec3Array,
all_atom_mask: jnp.ndarray,
pred_pos: geometry.Vec3Array
) -> Tuple[geometry.Vec3Array, jnp.ndarray, jnp.ndarray]:
"""Find atom14 positions, this includes finding the correct renaming."""
gt_positions, gt_mask = all_atom_multimer.atom37_to_atom14(
aatype, all_atom_positions,
all_atom_mask)
alt_gt_positions, alt_gt_mask = all_atom_multimer.get_alt_atom14(
aatype, gt_positions, gt_mask)
atom_is_ambiguous = all_atom_multimer.get_atom14_is_ambiguous(aatype)
alt_naming_is_better = all_atom_multimer.find_optimal_renaming(
gt_positions=gt_positions,
alt_gt_positions=alt_gt_positions,
atom_is_ambiguous=atom_is_ambiguous,
gt_exists=gt_mask,
pred_positions=pred_pos)
use_alt = alt_naming_is_better[:, None]
gt_mask = (1. - use_alt) * gt_mask + use_alt * alt_gt_mask
gt_positions = (1. - use_alt) * gt_positions + use_alt * alt_gt_positions
return gt_positions, alt_gt_mask, alt_naming_is_better
def backbone_loss(gt_rigid: geometry.Rigid3Array,
gt_frames_mask: jnp.ndarray,
gt_positions_mask: jnp.ndarray,
target_rigid: geometry.Rigid3Array,
config: ml_collections.ConfigDict,
pair_mask: jnp.ndarray
) -> Tuple[Float, jnp.ndarray]:
"""Backbone FAPE Loss."""
loss_fn = functools.partial(
all_atom_multimer.frame_aligned_point_error,
l1_clamp_distance=config.atom_clamp_distance,
loss_unit_distance=config.loss_unit_distance)
loss_fn = jax.vmap(loss_fn, (0, None, None, 0, None, None, None))
fape = loss_fn(target_rigid, gt_rigid, gt_frames_mask,
target_rigid.translation, gt_rigid.translation,
gt_positions_mask, pair_mask)
return jnp.mean(fape), fape[-1]
def compute_frames(
aatype: jnp.ndarray,
all_atom_positions: geometry.Vec3Array,
all_atom_mask: jnp.ndarray,
use_alt: jnp.ndarray
) -> Tuple[geometry.Rigid3Array, jnp.ndarray]:
"""Compute Frames from all atom positions.
Args:
aatype: array of aatypes, int of [N]
all_atom_positions: Vector of all atom positions, shape [N, 37]
all_atom_mask: mask, shape [N]
use_alt: whether to use alternative orientation for ambiguous aatypes
shape [N]
Returns:
Rigid corresponding to Frames w shape [N, 8],
mask which Rigids are present w shape [N, 8]
"""
frames_batch = all_atom_multimer.atom37_to_frames(aatype, all_atom_positions,
all_atom_mask)
gt_frames = frames_batch['rigidgroups_gt_frames']
alt_gt_frames = frames_batch['rigidgroups_alt_gt_frames']
use_alt = use_alt[:, None]
renamed_gt_frames = jax.tree_multimap(
lambda x, y: (1. - use_alt) * x + use_alt * y, gt_frames, alt_gt_frames)
return renamed_gt_frames, frames_batch['rigidgroups_gt_exists']
def sidechain_loss(gt_frames: geometry.Rigid3Array,
gt_frames_mask: jnp.ndarray,
gt_positions: geometry.Vec3Array,
gt_mask: jnp.ndarray,
pred_frames: geometry.Rigid3Array,
pred_positions: geometry.Vec3Array,
config: ml_collections.ConfigDict
) -> Dict[str, jnp.ndarray]:
"""Sidechain Loss using cleaned up rigids."""
flat_gt_frames = jax.tree_map(jnp.ravel, gt_frames)
flat_frames_mask = jnp.ravel(gt_frames_mask)
flat_gt_positions = jax.tree_map(jnp.ravel, gt_positions)
flat_positions_mask = jnp.ravel(gt_mask)
# Compute frame_aligned_point_error score for the final layer.
def _slice_last_layer_and_flatten(x):
return jnp.ravel(x[-1])
flat_pred_frames = jax.tree_map(_slice_last_layer_and_flatten, pred_frames)
flat_pred_positions = jax.tree_map(_slice_last_layer_and_flatten,
pred_positions)
fape = all_atom_multimer.frame_aligned_point_error(
pred_frames=flat_pred_frames,
target_frames=flat_gt_frames,
frames_mask=flat_frames_mask,
pred_positions=flat_pred_positions,
target_positions=flat_gt_positions,
positions_mask=flat_positions_mask,
pair_mask=None,
length_scale=config.sidechain.loss_unit_distance,
l1_clamp_distance=config.sidechain.atom_clamp_distance)
return {
'fape': fape,
'loss': fape}
def structural_violation_loss(mask: jnp.ndarray,
violations: Mapping[str, Float],
config: ml_collections.ConfigDict
) -> Float:
"""Computes Loss for structural Violations."""
# Put all violation losses together to one large loss.
num_atoms = jnp.sum(mask).astype(jnp.float32) + 1e-6
between_residues = violations['between_residues']
within_residues = violations['within_residues']
return (config.structural_violation_loss_weight *
(between_residues['bonds_c_n_loss_mean'] +
between_residues['angles_ca_c_n_loss_mean'] +
between_residues['angles_c_n_ca_loss_mean'] +
jnp.sum(between_residues['clashes_per_atom_loss_sum'] +
within_residues['per_atom_loss_sum']) / num_atoms
))
def find_structural_violations(
aatype: jnp.ndarray,
residue_index: jnp.ndarray,
mask: jnp.ndarray,
pred_positions: geometry.Vec3Array, # (N, 14)
config: ml_collections.ConfigDict
) -> Dict[str, Any]:
"""Computes several checks for structural Violations."""
# Compute between residue backbone violations of bonds and angles.
connection_violations = all_atom_multimer.between_residue_bond_loss(
pred_atom_positions=pred_positions,
pred_atom_mask=mask.astype(jnp.float32),
residue_index=residue_index.astype(jnp.float32),
aatype=aatype,
tolerance_factor_soft=config.violation_tolerance_factor,
tolerance_factor_hard=config.violation_tolerance_factor)
# Compute the van der Waals radius for every atom
# (the first letter of the atom name is the element type).
# shape (N, 14)
atomtype_radius = jnp.array([
residue_constants.van_der_waals_radius[name[0]]
for name in residue_constants.atom_types
])
residx_atom14_to_atom37 = all_atom_multimer.get_atom14_to_atom37_map(aatype)
atom_radius = mask * utils.batched_gather(atomtype_radius,
residx_atom14_to_atom37)
# Compute the between residue clash loss.
between_residue_clashes = all_atom_multimer.between_residue_clash_loss(
pred_positions=pred_positions,
atom_exists=mask,
atom_radius=atom_radius,
residue_index=residue_index,
overlap_tolerance_soft=config.clash_overlap_tolerance,
overlap_tolerance_hard=config.clash_overlap_tolerance)
# Compute all within-residue violations (clashes,
# bond length and angle violations).
restype_atom14_bounds = residue_constants.make_atom14_dists_bounds(
overlap_tolerance=config.clash_overlap_tolerance,
bond_length_tolerance_factor=config.violation_tolerance_factor)
dists_lower_bound = utils.batched_gather(restype_atom14_bounds['lower_bound'],
aatype)
dists_upper_bound = utils.batched_gather(restype_atom14_bounds['upper_bound'],
aatype)
within_residue_violations = all_atom_multimer.within_residue_violations(
pred_positions=pred_positions,
atom_exists=mask,
dists_lower_bound=dists_lower_bound,
dists_upper_bound=dists_upper_bound,
tighten_bounds_for_loss=0.0)
# Combine them to a single per-residue violation mask (used later for LDDT).
per_residue_violations_mask = jnp.max(jnp.stack([
connection_violations['per_residue_violation_mask'],
jnp.max(between_residue_clashes['per_atom_clash_mask'], axis=-1),
jnp.max(within_residue_violations['per_atom_violations'],
axis=-1)]), axis=0)
return {
'between_residues': {
'bonds_c_n_loss_mean':
connection_violations['c_n_loss_mean'], # ()
'angles_ca_c_n_loss_mean':
connection_violations['ca_c_n_loss_mean'], # ()
'angles_c_n_ca_loss_mean':
connection_violations['c_n_ca_loss_mean'], # ()
'connections_per_residue_loss_sum':
connection_violations['per_residue_loss_sum'], # (N)
'connections_per_residue_violation_mask':
connection_violations['per_residue_violation_mask'], # (N)
'clashes_mean_loss':
between_residue_clashes['mean_loss'], # ()
'clashes_per_atom_loss_sum':
between_residue_clashes['per_atom_loss_sum'], # (N, 14)
'clashes_per_atom_clash_mask':
between_residue_clashes['per_atom_clash_mask'], # (N, 14)
},
'within_residues': {
'per_atom_loss_sum':
within_residue_violations['per_atom_loss_sum'], # (N, 14)
'per_atom_violations':
within_residue_violations['per_atom_violations'], # (N, 14),
},
'total_per_residue_violations_mask':
per_residue_violations_mask, # (N)
}
def compute_violation_metrics(
residue_index: jnp.ndarray,
mask: jnp.ndarray,
seq_mask: jnp.ndarray,
pred_positions: geometry.Vec3Array, # (N, 14)
violations: Mapping[str, jnp.ndarray],
) -> Dict[str, jnp.ndarray]:
"""Compute several metrics to assess the structural violations."""
ret = {}
between_residues = violations['between_residues']
within_residues = violations['within_residues']
extreme_ca_ca_violations = all_atom_multimer.extreme_ca_ca_distance_violations(
positions=pred_positions,
mask=mask.astype(jnp.float32),
residue_index=residue_index.astype(jnp.float32))
ret['violations_extreme_ca_ca_distance'] = extreme_ca_ca_violations
ret['violations_between_residue_bond'] = utils.mask_mean(
mask=seq_mask,
value=between_residues['connections_per_residue_violation_mask'])
ret['violations_between_residue_clash'] = utils.mask_mean(
mask=seq_mask,
value=jnp.max(between_residues['clashes_per_atom_clash_mask'], axis=-1))
ret['violations_within_residue'] = utils.mask_mean(
mask=seq_mask,
value=jnp.max(within_residues['per_atom_violations'], axis=-1))
ret['violations_per_residue'] = utils.mask_mean(
mask=seq_mask, value=violations['total_per_residue_violations_mask'])
return ret
def supervised_chi_loss(
sequence_mask: jnp.ndarray,
target_chi_mask: jnp.ndarray,
aatype: jnp.ndarray,
target_chi_angles: jnp.ndarray,
pred_angles: jnp.ndarray,
unnormed_angles: jnp.ndarray,
config: ml_collections.ConfigDict) -> Tuple[Float, Float, Float]:
"""Computes loss for direct chi angle supervision."""
eps = 1e-6
chi_mask = target_chi_mask.astype(jnp.float32)
pred_angles = pred_angles[:, :, 3:]
residue_type_one_hot = jax.nn.one_hot(
aatype, residue_constants.restype_num + 1, dtype=jnp.float32)[None]
chi_pi_periodic = jnp.einsum('ijk, kl->ijl', residue_type_one_hot,
jnp.asarray(residue_constants.chi_pi_periodic))
true_chi = target_chi_angles[None]
sin_true_chi = jnp.sin(true_chi)
cos_true_chi = jnp.cos(true_chi)
sin_cos_true_chi = jnp.stack([sin_true_chi, cos_true_chi], axis=-1)
# This is -1 if chi is pi periodic and +1 if it's 2 pi periodic
shifted_mask = (1 - 2 * chi_pi_periodic)[..., None]
sin_cos_true_chi_shifted = shifted_mask * sin_cos_true_chi
sq_chi_error = jnp.sum(
squared_difference(sin_cos_true_chi, pred_angles), -1)
sq_chi_error_shifted = jnp.sum(
squared_difference(sin_cos_true_chi_shifted, pred_angles), -1)
sq_chi_error = jnp.minimum(sq_chi_error, sq_chi_error_shifted)
sq_chi_loss = utils.mask_mean(mask=chi_mask[None], value=sq_chi_error)
angle_norm = jnp.sqrt(jnp.sum(jnp.square(unnormed_angles), axis=-1) + eps)
norm_error = jnp.abs(angle_norm - 1.)
angle_norm_loss = utils.mask_mean(mask=sequence_mask[None, :, None],
value=norm_error)
loss = (config.chi_weight * sq_chi_loss
+ config.angle_norm_weight * angle_norm_loss)
return loss, sq_chi_loss, angle_norm_loss
def l2_normalize(x: jnp.ndarray,
axis: int = -1,
epsilon: float = 1e-12
) -> jnp.ndarray:
return x / jnp.sqrt(
jnp.maximum(jnp.sum(x**2, axis=axis, keepdims=True), epsilon))
def get_renamed_chi_angles(aatype: jnp.ndarray,
chi_angles: jnp.ndarray,
alt_is_better: jnp.ndarray
) -> jnp.ndarray:
"""Return renamed chi angles."""
chi_angle_is_ambiguous = utils.batched_gather(
jnp.array(residue_constants.chi_pi_periodic, dtype=jnp.float32), aatype)
alt_chi_angles = chi_angles + np.pi * chi_angle_is_ambiguous
# Map back to [-pi, pi].
alt_chi_angles = alt_chi_angles - 2 * np.pi * (alt_chi_angles > np.pi).astype(
jnp.float32)
alt_is_better = alt_is_better[:, None]
return (1. - alt_is_better) * chi_angles + alt_is_better * alt_chi_angles
class MultiRigidSidechain(hk.Module):
"""Class to make side chain atoms."""
def __init__(self,
config: ml_collections.ConfigDict,
global_config: ml_collections.ConfigDict,
name: str = 'rigid_sidechain'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
rigid: geometry.Rigid3Array,
representations_list: Iterable[jnp.ndarray],
aatype: jnp.ndarray
) -> Dict[str, Any]:
"""Predict sidechains using multi-rigid representations.
Args:
rigid: The Rigid's for each residue (translations in angstoms)
representations_list: A list of activations to predict sidechains from.
aatype: amino acid types.
Returns:
dict containing atom positions and frames (in angstrom)
"""
act = [
common_modules.Linear( # pylint: disable=g-complex-comprehension
self.config.num_channel,
name='input_projection')(jax.nn.relu(x))
for x in representations_list]
# Sum the activation list (equivalent to concat then Conv1D)
act = sum(act)
final_init = 'zeros' if self.global_config.zero_init else 'linear'
# Mapping with some residual blocks.
for _ in range(self.config.num_residual_block):
old_act = act
act = common_modules.Linear(
self.config.num_channel,
initializer='relu',
name='resblock1')(
jax.nn.relu(act))
act = common_modules.Linear(
self.config.num_channel,
initializer=final_init,
name='resblock2')(
jax.nn.relu(act))
act += old_act
# Map activations to torsion angles.
# [batch_size, num_res, 14]
num_res = act.shape[0]
unnormalized_angles = common_modules.Linear(
14, name='unnormalized_angles')(
jax.nn.relu(act))
unnormalized_angles = jnp.reshape(
unnormalized_angles, [num_res, 7, 2])
angles = l2_normalize(unnormalized_angles, axis=-1)
outputs = {
'angles_sin_cos': angles, # jnp.ndarray (N, 7, 2)
'unnormalized_angles_sin_cos':
unnormalized_angles, # jnp.ndarray (N, 7, 2)
}
# Map torsion angles to frames.
# geometry.Rigid3Array with shape (N, 8)
all_frames_to_global = all_atom_multimer.torsion_angles_to_frames(
aatype,
rigid,
angles)
# Use frames and literature positions to create the final atom coordinates.
# geometry.Vec3Array with shape (N, 14)
pred_positions = all_atom_multimer.frames_and_literature_positions_to_atom14_pos(
aatype, all_frames_to_global)
outputs.update({
'atom_pos': pred_positions, # geometry.Vec3Array (N, 14)
'frames': all_frames_to_global, # geometry.Rigid3Array (N, 8)
})
return outputs
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Geometry Module."""
from alphafold.model.geometry import rigid_matrix_vector
from alphafold.model.geometry import rotation_matrix
from alphafold.model.geometry import struct_of_array
from alphafold.model.geometry import vector
Rot3Array = rotation_matrix.Rot3Array
Rigid3Array = rigid_matrix_vector.Rigid3Array
StructOfArray = struct_of_array.StructOfArray
Vec3Array = vector.Vec3Array
square_euclidean_distance = vector.square_euclidean_distance
euclidean_distance = vector.euclidean_distance
dihedral_angle = vector.dihedral_angle
dot = vector.dot
cross = vector.cross
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rigid3Array Transformations represented by a Matrix and a Vector."""
from __future__ import annotations
from typing import Union
from alphafold.model.geometry import rotation_matrix
from alphafold.model.geometry import struct_of_array
from alphafold.model.geometry import vector
import jax
import jax.numpy as jnp
Float = Union[float, jnp.ndarray]
VERSION = '0.1'
@struct_of_array.StructOfArray(same_dtype=True)
class Rigid3Array:
"""Rigid Transformation, i.e. element of special euclidean group."""
rotation: rotation_matrix.Rot3Array
translation: vector.Vec3Array
def __matmul__(self, other: Rigid3Array) -> Rigid3Array:
new_rotation = self.rotation @ other.rotation
new_translation = self.apply_to_point(other.translation)
return Rigid3Array(new_rotation, new_translation)
def inverse(self) -> Rigid3Array:
"""Return Rigid3Array corresponding to inverse transform."""
inv_rotation = self.rotation.inverse()
inv_translation = inv_rotation.apply_to_point(-self.translation)
return Rigid3Array(inv_rotation, inv_translation)
def apply_to_point(self, point: vector.Vec3Array) -> vector.Vec3Array:
"""Apply Rigid3Array transform to point."""
return self.rotation.apply_to_point(point) + self.translation
def apply_inverse_to_point(self, point: vector.Vec3Array) -> vector.Vec3Array:
"""Apply inverse Rigid3Array transform to point."""
new_point = point - self.translation
return self.rotation.apply_inverse_to_point(new_point)
def compose_rotation(self, other_rotation):
rot = self.rotation @ other_rotation
trans = jax.tree_map(lambda x: jnp.broadcast_to(x, rot.shape),
self.translation)
return Rigid3Array(rot, trans)
@classmethod
def identity(cls, shape, dtype=jnp.float32) -> Rigid3Array:
"""Return identity Rigid3Array of given shape."""
return cls(
rotation_matrix.Rot3Array.identity(shape, dtype=dtype),
vector.Vec3Array.zeros(shape, dtype=dtype))
def scale_translation(self, factor: Float) -> Rigid3Array:
"""Scale translation in Rigid3Array by 'factor'."""
return Rigid3Array(self.rotation, self.translation * factor)
def to_array(self):
rot_array = self.rotation.to_array()
vec_array = self.translation.to_array()
return jnp.concatenate([rot_array, vec_array[..., None]], axis=-1)
@classmethod
def from_array(cls, array):
rot = rotation_matrix.Rot3Array.from_array(array[..., :3])
vec = vector.Vec3Array.from_array(array[..., -1])
return cls(rot, vec)
@classmethod
def from_array4x4(cls, array: jnp.ndarray) -> Rigid3Array:
"""Construct Rigid3Array from homogeneous 4x4 array."""
assert array.shape[-1] == 4
assert array.shape[-2] == 4
rotation = rotation_matrix.Rot3Array(
array[..., 0, 0], array[..., 0, 1], array[..., 0, 2],
array[..., 1, 0], array[..., 1, 1], array[..., 1, 2],
array[..., 2, 0], array[..., 2, 1], array[..., 2, 2]
)
translation = vector.Vec3Array(
array[..., 0, 3], array[..., 1, 3], array[..., 2, 3])
return cls(rotation, translation)
def __getstate__(self):
return (VERSION, (self.rotation, self.translation))
def __setstate__(self, state):
version, (rot, trans) = state
del version
object.__setattr__(self, 'rotation', rot)
object.__setattr__(self, 'translation', trans)
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rot3Array Matrix Class."""
from __future__ import annotations
import dataclasses
from alphafold.model.geometry import struct_of_array
from alphafold.model.geometry import utils
from alphafold.model.geometry import vector
import jax
import jax.numpy as jnp
import numpy as np
COMPONENTS = ['xx', 'xy', 'xz', 'yx', 'yy', 'yz', 'zx', 'zy', 'zz']
VERSION = '0.1'
@struct_of_array.StructOfArray(same_dtype=True)
class Rot3Array:
"""Rot3Array Matrix in 3 dimensional Space implemented as struct of arrays."""
xx: jnp.ndarray = dataclasses.field(metadata={'dtype': jnp.float32})
xy: jnp.ndarray
xz: jnp.ndarray
yx: jnp.ndarray
yy: jnp.ndarray
yz: jnp.ndarray
zx: jnp.ndarray
zy: jnp.ndarray
zz: jnp.ndarray
__array_ufunc__ = None
def inverse(self) -> Rot3Array:
"""Returns inverse of Rot3Array."""
return Rot3Array(self.xx, self.yx, self.zx,
self.xy, self.yy, self.zy,
self.xz, self.yz, self.zz)
def apply_to_point(self, point: vector.Vec3Array) -> vector.Vec3Array:
"""Applies Rot3Array to point."""
return vector.Vec3Array(
self.xx * point.x + self.xy * point.y + self.xz * point.z,
self.yx * point.x + self.yy * point.y + self.yz * point.z,
self.zx * point.x + self.zy * point.y + self.zz * point.z)
def apply_inverse_to_point(self, point: vector.Vec3Array) -> vector.Vec3Array:
"""Applies inverse Rot3Array to point."""
return self.inverse().apply_to_point(point)
def __matmul__(self, other: Rot3Array) -> Rot3Array:
"""Composes two Rot3Arrays."""
c0 = self.apply_to_point(vector.Vec3Array(other.xx, other.yx, other.zx))
c1 = self.apply_to_point(vector.Vec3Array(other.xy, other.yy, other.zy))
c2 = self.apply_to_point(vector.Vec3Array(other.xz, other.yz, other.zz))
return Rot3Array(c0.x, c1.x, c2.x, c0.y, c1.y, c2.y, c0.z, c1.z, c2.z)
@classmethod
def identity(cls, shape, dtype=jnp.float32) -> Rot3Array:
"""Returns identity of given shape."""
ones = jnp.ones(shape, dtype=dtype)
zeros = jnp.zeros(shape, dtype=dtype)
return cls(ones, zeros, zeros, zeros, ones, zeros, zeros, zeros, ones)
@classmethod
def from_two_vectors(cls, e0: vector.Vec3Array,
e1: vector.Vec3Array) -> Rot3Array:
"""Construct Rot3Array from two Vectors.
Rot3Array is constructed such that in the corresponding frame 'e0' lies on
the positive x-Axis and 'e1' lies in the xy plane with positive sign of y.
Args:
e0: Vector
e1: Vector
Returns:
Rot3Array
"""
# Normalize the unit vector for the x-axis, e0.
e0 = e0.normalized()
# make e1 perpendicular to e0.
c = e1.dot(e0)
e1 = (e1 - c * e0).normalized()
# Compute e2 as cross product of e0 and e1.
e2 = e0.cross(e1)
return cls(e0.x, e1.x, e2.x, e0.y, e1.y, e2.y, e0.z, e1.z, e2.z)
@classmethod
def from_array(cls, array: jnp.ndarray) -> Rot3Array:
"""Construct Rot3Array Matrix from array of shape. [..., 3, 3]."""
unstacked = utils.unstack(array, axis=-2)
unstacked = sum([utils.unstack(x, axis=-1) for x in unstacked], [])
return cls(*unstacked)
def to_array(self) -> jnp.ndarray:
"""Convert Rot3Array to array of shape [..., 3, 3]."""
return jnp.stack(
[jnp.stack([self.xx, self.xy, self.xz], axis=-1),
jnp.stack([self.yx, self.yy, self.yz], axis=-1),
jnp.stack([self.zx, self.zy, self.zz], axis=-1)],
axis=-2)
@classmethod
def from_quaternion(cls,
w: jnp.ndarray,
x: jnp.ndarray,
y: jnp.ndarray,
z: jnp.ndarray,
normalize: bool = True,
epsilon: float = 1e-6) -> Rot3Array:
"""Construct Rot3Array from components of quaternion."""
if normalize:
inv_norm = jax.lax.rsqrt(jnp.maximum(epsilon, w**2 + x**2 + y**2 + z**2))
w *= inv_norm
x *= inv_norm
y *= inv_norm
z *= inv_norm
xx = 1 - 2 * (jnp.square(y) + jnp.square(z))
xy = 2 * (x * y - w * z)
xz = 2 * (x * z + w * y)
yx = 2 * (x * y + w * z)
yy = 1 - 2 * (jnp.square(x) + jnp.square(z))
yz = 2 * (y * z - w * x)
zx = 2 * (x * z - w * y)
zy = 2 * (y * z + w * x)
zz = 1 - 2 * (jnp.square(x) + jnp.square(y))
return cls(xx, xy, xz, yx, yy, yz, zx, zy, zz)
@classmethod
def random_uniform(cls, key, shape, dtype=jnp.float32) -> Rot3Array:
"""Samples uniform random Rot3Array according to Haar Measure."""
quat_array = jax.random.normal(key, tuple(shape) + (4,), dtype=dtype)
quats = utils.unstack(quat_array)
return cls.from_quaternion(*quats)
def __getstate__(self):
return (VERSION,
[np.asarray(getattr(self, field)) for field in COMPONENTS])
def __setstate__(self, state):
version, state = state
del version
for i, field in enumerate(COMPONENTS):
object.__setattr__(self, field, state[i])
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class decorator to represent (nested) struct of arrays."""
import dataclasses
import jax
def get_item(instance, key):
sliced = {}
for field in get_array_fields(instance):
num_trailing_dims = field.metadata.get('num_trailing_dims', 0)
this_key = key
if isinstance(key, tuple) and Ellipsis in this_key:
this_key += (slice(None),) * num_trailing_dims
sliced[field.name] = getattr(instance, field.name)[this_key]
return dataclasses.replace(instance, **sliced)
@property
def get_shape(instance):
"""Returns Shape for given instance of dataclass."""
first_field = dataclasses.fields(instance)[0]
num_trailing_dims = first_field.metadata.get('num_trailing_dims', None)
value = getattr(instance, first_field.name)
if num_trailing_dims:
return value.shape[:-num_trailing_dims]
else:
return value.shape
def get_len(instance):
"""Returns length for given instance of dataclass."""
shape = instance.shape
if shape:
return shape[0]
else:
raise TypeError('len() of unsized object') # Match jax.numpy behavior.
@property
def get_dtype(instance):
"""Returns Dtype for given instance of dataclass."""
fields = dataclasses.fields(instance)
sets_dtype = [
field.name for field in fields if field.metadata.get('sets_dtype', False)
]
if sets_dtype:
assert len(sets_dtype) == 1, 'at most field can set dtype'
field_value = getattr(instance, sets_dtype[0])
elif instance.same_dtype:
field_value = getattr(instance, fields[0].name)
else:
# Should this be Value Error?
raise AttributeError('Trying to access Dtype on Struct of Array without'
'either "same_dtype" or field setting dtype')
if hasattr(field_value, 'dtype'):
return field_value.dtype
else:
# Should this be Value Error?
raise AttributeError(f'field_value {field_value} does not have dtype')
def replace(instance, **kwargs):
return dataclasses.replace(instance, **kwargs)
def post_init(instance):
"""Validate instance has same shapes & dtypes."""
array_fields = get_array_fields(instance)
arrays = list(get_array_fields(instance, return_values=True).values())
first_field = array_fields[0]
# These slightly weird constructions about checking whether the leaves are
# actual arrays is since e.g. vmap internally relies on being able to
# construct pytree's with object() as leaves, this would break the checking
# as such we are only validating the object when the entries in the dataclass
# Are arrays or other dataclasses of arrays.
try:
dtype = instance.dtype
except AttributeError:
dtype = None
if dtype is not None:
first_shape = instance.shape
for array, field in zip(arrays, array_fields):
field_shape = array.shape
num_trailing_dims = field.metadata.get('num_trailing_dims', None)
if num_trailing_dims:
array_shape = array.shape
field_shape = array_shape[:-num_trailing_dims]
msg = (f'field {field} should have number of trailing dims'
' {num_trailing_dims}')
assert len(array_shape) == len(first_shape) + num_trailing_dims, msg
else:
field_shape = array.shape
shape_msg = (f"Stripped Shape {field_shape} of field {field} doesn't "
f"match shape {first_shape} of field {first_field}")
assert field_shape == first_shape, shape_msg
field_dtype = array.dtype
allowed_metadata_dtypes = field.metadata.get('allowed_dtypes', [])
if allowed_metadata_dtypes:
msg = f'Dtype is {field_dtype} but must be in {allowed_metadata_dtypes}'
assert field_dtype in allowed_metadata_dtypes, msg
if 'dtype' in field.metadata:
target_dtype = field.metadata['dtype']
else:
target_dtype = dtype
msg = f'Dtype is {field_dtype} but must be {target_dtype}'
assert field_dtype == target_dtype, msg
def flatten(instance):
"""Flatten Struct of Array instance."""
array_likes = list(get_array_fields(instance, return_values=True).values())
flat_array_likes = []
inner_treedefs = []
num_arrays = []
for array_like in array_likes:
flat_array_like, inner_treedef = jax.tree_flatten(array_like)
inner_treedefs.append(inner_treedef)
flat_array_likes += flat_array_like
num_arrays.append(len(flat_array_like))
metadata = get_metadata_fields(instance, return_values=True)
metadata = type(instance).metadata_cls(**metadata)
return flat_array_likes, (inner_treedefs, metadata, num_arrays)
def make_metadata_class(cls):
metadata_fields = get_fields(cls,
lambda x: x.metadata.get('is_metadata', False))
metadata_cls = dataclasses.make_dataclass(
cls_name='Meta' + cls.__name__,
fields=[(field.name, field.type, field) for field in metadata_fields],
frozen=True,
eq=True)
return metadata_cls
def get_fields(cls_or_instance, filterfn, return_values=False):
fields = dataclasses.fields(cls_or_instance)
fields = [field for field in fields if filterfn(field)]
if return_values:
return {
field.name: getattr(cls_or_instance, field.name) for field in fields
}
else:
return fields
def get_array_fields(cls, return_values=False):
return get_fields(
cls,
lambda x: not x.metadata.get('is_metadata', False),
return_values=return_values)
def get_metadata_fields(cls, return_values=False):
return get_fields(
cls,
lambda x: x.metadata.get('is_metadata', False),
return_values=return_values)
class StructOfArray:
"""Class Decorator for Struct Of Arrays."""
def __init__(self, same_dtype=True):
self.same_dtype = same_dtype
def __call__(self, cls):
cls.__array_ufunc__ = None
cls.replace = replace
cls.same_dtype = self.same_dtype
cls.dtype = get_dtype
cls.shape = get_shape
cls.__len__ = get_len
cls.__getitem__ = get_item
cls.__post_init__ = post_init
new_cls = dataclasses.dataclass(cls, frozen=True, eq=False) # pytype: disable=wrong-keyword-args
# pytree claims to require metadata to be hashable, not sure why,
# But making derived dataclass that can just hold metadata
new_cls.metadata_cls = make_metadata_class(new_cls)
def unflatten(aux, data):
inner_treedefs, metadata, num_arrays = aux
array_fields = [field.name for field in get_array_fields(new_cls)]
value_dict = {}
array_start = 0
for num_array, inner_treedef, array_field in zip(num_arrays,
inner_treedefs,
array_fields):
value_dict[array_field] = jax.tree_unflatten(
inner_treedef, data[array_start:array_start + num_array])
array_start += num_array
metadata_fields = get_metadata_fields(new_cls)
for field in metadata_fields:
value_dict[field.name] = getattr(metadata, field.name)
return new_cls(**value_dict)
jax.tree_util.register_pytree_node(
nodetype=new_cls, flatten_func=flatten, unflatten_func=unflatten)
return new_cls
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utils for tests."""
import dataclasses
from alphafold.model.geometry import rigid_matrix_vector
from alphafold.model.geometry import rotation_matrix
from alphafold.model.geometry import vector
import jax.numpy as jnp
import numpy as np
def assert_rotation_matrix_equal(matrix1: rotation_matrix.Rot3Array,
matrix2: rotation_matrix.Rot3Array):
for field in dataclasses.fields(rotation_matrix.Rot3Array):
field = field.name
np.testing.assert_array_equal(
getattr(matrix1, field), getattr(matrix2, field))
def assert_rotation_matrix_close(mat1: rotation_matrix.Rot3Array,
mat2: rotation_matrix.Rot3Array):
np.testing.assert_array_almost_equal(mat1.to_array(), mat2.to_array(), 6)
def assert_array_equal_to_rotation_matrix(array: jnp.ndarray,
matrix: rotation_matrix.Rot3Array):
"""Check that array and Matrix match."""
np.testing.assert_array_equal(matrix.xx, array[..., 0, 0])
np.testing.assert_array_equal(matrix.xy, array[..., 0, 1])
np.testing.assert_array_equal(matrix.xz, array[..., 0, 2])
np.testing.assert_array_equal(matrix.yx, array[..., 1, 0])
np.testing.assert_array_equal(matrix.yy, array[..., 1, 1])
np.testing.assert_array_equal(matrix.yz, array[..., 1, 2])
np.testing.assert_array_equal(matrix.zx, array[..., 2, 0])
np.testing.assert_array_equal(matrix.zy, array[..., 2, 1])
np.testing.assert_array_equal(matrix.zz, array[..., 2, 2])
def assert_array_close_to_rotation_matrix(array: jnp.ndarray,
matrix: rotation_matrix.Rot3Array):
np.testing.assert_array_almost_equal(matrix.to_array(), array, 6)
def assert_vectors_equal(vec1: vector.Vec3Array, vec2: vector.Vec3Array):
np.testing.assert_array_equal(vec1.x, vec2.x)
np.testing.assert_array_equal(vec1.y, vec2.y)
np.testing.assert_array_equal(vec1.z, vec2.z)
def assert_vectors_close(vec1: vector.Vec3Array, vec2: vector.Vec3Array):
np.testing.assert_allclose(vec1.x, vec2.x, atol=1e-6, rtol=0.)
np.testing.assert_allclose(vec1.y, vec2.y, atol=1e-6, rtol=0.)
np.testing.assert_allclose(vec1.z, vec2.z, atol=1e-6, rtol=0.)
def assert_array_close_to_vector(array: jnp.ndarray, vec: vector.Vec3Array):
np.testing.assert_allclose(vec.to_array(), array, atol=1e-6, rtol=0.)
def assert_array_equal_to_vector(array: jnp.ndarray, vec: vector.Vec3Array):
np.testing.assert_array_equal(vec.to_array(), array)
def assert_rigid_equal_to_rigid(rigid1: rigid_matrix_vector.Rigid3Array,
rigid2: rigid_matrix_vector.Rigid3Array):
assert_rot_trans_equal_to_rigid(rigid1.rotation, rigid1.translation, rigid2)
def assert_rigid_close_to_rigid(rigid1: rigid_matrix_vector.Rigid3Array,
rigid2: rigid_matrix_vector.Rigid3Array):
assert_rot_trans_close_to_rigid(rigid1.rotation, rigid1.translation, rigid2)
def assert_rot_trans_equal_to_rigid(rot: rotation_matrix.Rot3Array,
trans: vector.Vec3Array,
rigid: rigid_matrix_vector.Rigid3Array):
assert_rotation_matrix_equal(rot, rigid.rotation)
assert_vectors_equal(trans, rigid.translation)
def assert_rot_trans_close_to_rigid(rot: rotation_matrix.Rot3Array,
trans: vector.Vec3Array,
rigid: rigid_matrix_vector.Rigid3Array):
assert_rotation_matrix_close(rot, rigid.rotation)
assert_vectors_close(trans, rigid.translation)
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for geometry library."""
from typing import List
import jax.numpy as jnp
def unstack(value: jnp.ndarray, axis: int = -1) -> List[jnp.ndarray]:
return [jnp.squeeze(v, axis=axis)
for v in jnp.split(value, value.shape[axis], axis=axis)]
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vec3Array Class."""
from __future__ import annotations
import dataclasses
from typing import Union
from alphafold.model.geometry import struct_of_array
from alphafold.model.geometry import utils
import jax
import jax.numpy as jnp
import numpy as np
Float = Union[float, jnp.ndarray]
VERSION = '0.1'
@struct_of_array.StructOfArray(same_dtype=True)
class Vec3Array:
"""Vec3Array in 3 dimensional Space implemented as struct of arrays.
This is done in order to improve performance and precision.
On TPU small matrix multiplications are very suboptimal and will waste large
compute ressources, furthermore any matrix multiplication on tpu happen in
mixed bfloat16/float32 precision, which is often undesirable when handling
physical coordinates.
In most cases this will also be faster on cpu's/gpu's since it allows for
easier use of vector instructions.
"""
x: jnp.ndarray = dataclasses.field(metadata={'dtype': jnp.float32})
y: jnp.ndarray
z: jnp.ndarray
def __post_init__(self):
if hasattr(self.x, 'dtype'):
assert self.x.dtype == self.y.dtype
assert self.x.dtype == self.z.dtype
assert all([x == y for x, y in zip(self.x.shape, self.y.shape)])
assert all([x == z for x, z in zip(self.x.shape, self.z.shape)])
def __add__(self, other: Vec3Array) -> Vec3Array:
return jax.tree_multimap(lambda x, y: x + y, self, other)
def __sub__(self, other: Vec3Array) -> Vec3Array:
return jax.tree_multimap(lambda x, y: x - y, self, other)
def __mul__(self, other: Float) -> Vec3Array:
return jax.tree_map(lambda x: x * other, self)
def __rmul__(self, other: Float) -> Vec3Array:
return self * other
def __truediv__(self, other: Float) -> Vec3Array:
return jax.tree_map(lambda x: x / other, self)
def __neg__(self) -> Vec3Array:
return jax.tree_map(lambda x: -x, self)
def __pos__(self) -> Vec3Array:
return jax.tree_map(lambda x: x, self)
def cross(self, other: Vec3Array) -> Vec3Array:
"""Compute cross product between 'self' and 'other'."""
new_x = self.y * other.z - self.z * other.y
new_y = self.z * other.x - self.x * other.z
new_z = self.x * other.y - self.y * other.x
return Vec3Array(new_x, new_y, new_z)
def dot(self, other: Vec3Array) -> Float:
"""Compute dot product between 'self' and 'other'."""
return self.x * other.x + self.y * other.y + self.z * other.z
def norm(self, epsilon: float = 1e-6) -> Float:
"""Compute Norm of Vec3Array, clipped to epsilon."""
# To avoid NaN on the backward pass, we must use maximum before the sqrt
norm2 = self.dot(self)
if epsilon:
norm2 = jnp.maximum(norm2, epsilon**2)
return jnp.sqrt(norm2)
def norm2(self):
return self.dot(self)
def normalized(self, epsilon: float = 1e-6) -> Vec3Array:
"""Return unit vector with optional clipping."""
return self / self.norm(epsilon)
@classmethod
def zeros(cls, shape, dtype=jnp.float32):
"""Return Vec3Array corresponding to zeros of given shape."""
return cls(
jnp.zeros(shape, dtype), jnp.zeros(shape, dtype),
jnp.zeros(shape, dtype))
def to_array(self) -> jnp.ndarray:
return jnp.stack([self.x, self.y, self.z], axis=-1)
@classmethod
def from_array(cls, array):
return cls(*utils.unstack(array))
def __getstate__(self):
return (VERSION,
[np.asarray(self.x),
np.asarray(self.y),
np.asarray(self.z)])
def __setstate__(self, state):
version, state = state
del version
for i, letter in enumerate('xyz'):
object.__setattr__(self, letter, state[i])
def square_euclidean_distance(vec1: Vec3Array,
vec2: Vec3Array,
epsilon: float = 1e-6) -> Float:
"""Computes square of euclidean distance between 'vec1' and 'vec2'.
Args:
vec1: Vec3Array to compute distance to
vec2: Vec3Array to compute distance from, should be
broadcast compatible with 'vec1'
epsilon: distance is clipped from below to be at least epsilon
Returns:
Array of square euclidean distances;
shape will be result of broadcasting 'vec1' and 'vec2'
"""
difference = vec1 - vec2
distance = difference.dot(difference)
if epsilon:
distance = jnp.maximum(distance, epsilon)
return distance
def dot(vector1: Vec3Array, vector2: Vec3Array) -> Float:
return vector1.dot(vector2)
def cross(vector1: Vec3Array, vector2: Vec3Array) -> Float:
return vector1.cross(vector2)
def norm(vector: Vec3Array, epsilon: float = 1e-6) -> Float:
return vector.norm(epsilon)
def normalized(vector: Vec3Array, epsilon: float = 1e-6) -> Vec3Array:
return vector.normalized(epsilon)
def euclidean_distance(vec1: Vec3Array,
vec2: Vec3Array,
epsilon: float = 1e-6) -> Float:
"""Computes euclidean distance between 'vec1' and 'vec2'.
Args:
vec1: Vec3Array to compute euclidean distance to
vec2: Vec3Array to compute euclidean distance from, should be
broadcast compatible with 'vec1'
epsilon: distance is clipped from below to be at least epsilon
Returns:
Array of euclidean distances;
shape will be result of broadcasting 'vec1' and 'vec2'
"""
distance_sq = square_euclidean_distance(vec1, vec2, epsilon**2)
distance = jnp.sqrt(distance_sq)
return distance
def dihedral_angle(a: Vec3Array, b: Vec3Array, c: Vec3Array,
d: Vec3Array) -> Float:
"""Computes torsion angle for a quadruple of points.
For points (a, b, c, d), this is the angle between the planes defined by
points (a, b, c) and (b, c, d). It is also known as the dihedral angle.
Arguments:
a: A Vec3Array of coordinates.
b: A Vec3Array of coordinates.
c: A Vec3Array of coordinates.
d: A Vec3Array of coordinates.
Returns:
A tensor of angles in radians: [-pi, pi].
"""
v1 = a - b
v2 = b - c
v3 = d - c
c1 = v1.cross(v2)
c2 = v3.cross(v2)
c3 = c2.cross(c1)
v2_mag = v2.norm()
return jnp.arctan2(c3.dot(v2), v2_mag * c1.dot(c2))
def random_gaussian_vector(shape, key, dtype=jnp.float32):
vec_array = jax.random.normal(key, shape + (3,), dtype)
return Vec3Array.from_array(vec_array)
...@@ -19,6 +19,7 @@ from absl import logging ...@@ -19,6 +19,7 @@ from absl import logging
from alphafold.common import confidence from alphafold.common import confidence
from alphafold.model import features from alphafold.model import features
from alphafold.model import modules from alphafold.model import modules
from alphafold.model import modules_multimer
import haiku as hk import haiku as hk
import jax import jax
import ml_collections import ml_collections
...@@ -28,19 +29,34 @@ import tree ...@@ -28,19 +29,34 @@ import tree
def get_confidence_metrics( def get_confidence_metrics(
prediction_result: Mapping[str, Any]) -> Mapping[str, Any]: prediction_result: Mapping[str, Any],
multimer_mode: bool) -> Mapping[str, Any]:
"""Post processes prediction_result to get confidence metrics.""" """Post processes prediction_result to get confidence metrics."""
confidence_metrics = {} confidence_metrics = {}
confidence_metrics['plddt'] = confidence.compute_plddt( confidence_metrics['plddt'] = confidence.compute_plddt(
prediction_result['predicted_lddt']['logits']) prediction_result['predicted_lddt']['logits'])
if 'predicted_aligned_error' in prediction_result: if 'predicted_aligned_error' in prediction_result:
confidence_metrics.update(confidence.compute_predicted_aligned_error( confidence_metrics.update(confidence.compute_predicted_aligned_error(
prediction_result['predicted_aligned_error']['logits'], logits=prediction_result['predicted_aligned_error']['logits'],
prediction_result['predicted_aligned_error']['breaks'])) breaks=prediction_result['predicted_aligned_error']['breaks']))
confidence_metrics['ptm'] = confidence.predicted_tm_score( confidence_metrics['ptm'] = confidence.predicted_tm_score(
prediction_result['predicted_aligned_error']['logits'], logits=prediction_result['predicted_aligned_error']['logits'],
prediction_result['predicted_aligned_error']['breaks']) breaks=prediction_result['predicted_aligned_error']['breaks'],
asym_id=None)
if multimer_mode:
# Compute the ipTM only for the multimer model.
confidence_metrics['iptm'] = confidence.predicted_tm_score(
logits=prediction_result['predicted_aligned_error']['logits'],
breaks=prediction_result['predicted_aligned_error']['breaks'],
asym_id=prediction_result['predicted_aligned_error']['asym_id'],
interface=True)
confidence_metrics['ranking_confidence'] = (
0.8 * confidence_metrics['iptm'] + 0.2 * confidence_metrics['ptm'])
if not multimer_mode:
# Monomer models use mean pLDDT for model ranking.
confidence_metrics['ranking_confidence'] = np.mean(
confidence_metrics['plddt'])
return confidence_metrics return confidence_metrics
...@@ -53,14 +69,22 @@ class RunModel: ...@@ -53,14 +69,22 @@ class RunModel:
params: Optional[Mapping[str, Mapping[str, np.ndarray]]] = None): params: Optional[Mapping[str, Mapping[str, np.ndarray]]] = None):
self.config = config self.config = config
self.params = params self.params = params
self.multimer_mode = config.model.global_config.multimer_mode
def _forward_fn(batch):
model = modules.AlphaFold(self.config.model) if self.multimer_mode:
return model( def _forward_fn(batch):
batch, model = modules_multimer.AlphaFold(self.config.model)
is_training=False, return model(
compute_loss=False, batch,
ensemble_representations=True) is_training=False)
else:
def _forward_fn(batch):
model = modules.AlphaFold(self.config.model)
return model(
batch,
is_training=False,
compute_loss=False,
ensemble_representations=True)
self.apply = jax.jit(hk.transform(_forward_fn).apply) self.apply = jax.jit(hk.transform(_forward_fn).apply)
self.init = jax.jit(hk.transform(_forward_fn).init) self.init = jax.jit(hk.transform(_forward_fn).init)
...@@ -98,6 +122,11 @@ class RunModel: ...@@ -98,6 +122,11 @@ class RunModel:
Returns: Returns:
A dict of NumPy feature arrays suitable for feeding into the model. A dict of NumPy feature arrays suitable for feeding into the model.
""" """
if self.multimer_mode:
return raw_features
# Single-chain mode.
if isinstance(raw_features, dict): if isinstance(raw_features, dict):
return features.np_example_to_features( return features.np_example_to_features(
np_example=raw_features, np_example=raw_features,
...@@ -117,12 +146,17 @@ class RunModel: ...@@ -117,12 +146,17 @@ class RunModel:
logging.info('Output shape was %s', shape) logging.info('Output shape was %s', shape)
return shape return shape
def predict(self, feat: features.FeatureDict) -> Mapping[str, Any]: def predict(self,
feat: features.FeatureDict,
random_seed: int,
) -> Mapping[str, Any]:
"""Makes a prediction by inferencing the model on the provided features. """Makes a prediction by inferencing the model on the provided features.
Args: Args:
feat: A dictionary of NumPy feature arrays as output by feat: A dictionary of NumPy feature arrays as output by
RunModel.process_features. RunModel.process_features.
random_seed: The random seed to use when running the model. In the
multimer model this controls the MSA sampling.
Returns: Returns:
A dictionary of model outputs. A dictionary of model outputs.
...@@ -130,12 +164,14 @@ class RunModel: ...@@ -130,12 +164,14 @@ class RunModel:
self.init_params(feat) self.init_params(feat)
logging.info('Running predict with shape(feat) = %s', logging.info('Running predict with shape(feat) = %s',
tree.map_structure(lambda x: x.shape, feat)) tree.map_structure(lambda x: x.shape, feat))
result = self.apply(self.params, jax.random.PRNGKey(0), feat) result = self.apply(self.params, jax.random.PRNGKey(random_seed), feat)
# This block is to ensure benchmark timings are accurate. Some blocking is # This block is to ensure benchmark timings are accurate. Some blocking is
# already happening when computing get_confidence_metrics, and this ensures # already happening when computing get_confidence_metrics, and this ensures
# all outputs are blocked on. # all outputs are blocked on.
jax.tree_map(lambda x: x.block_until_ready(), result) jax.tree_map(lambda x: x.block_until_ready(), result)
result.update(get_confidence_metrics(result)) result.update(
get_confidence_metrics(result, multimer_mode=self.multimer_mode))
logging.info('Output shape was %s', logging.info('Output shape was %s',
tree.map_structure(lambda x: x.shape, result)) tree.map_structure(lambda x: x.shape, result))
return result return result
...@@ -965,6 +965,11 @@ class MaskedMsaHead(hk.Module): ...@@ -965,6 +965,11 @@ class MaskedMsaHead(hk.Module):
self.config = config self.config = config
self.global_config = global_config self.global_config = global_config
if global_config.multimer_mode:
self.num_output = len(residue_constants.restypes_with_x_and_gap)
else:
self.num_output = config.num_output
def __call__(self, representations, batch, is_training): def __call__(self, representations, batch, is_training):
"""Builds MaskedMsaHead module. """Builds MaskedMsaHead module.
...@@ -981,7 +986,7 @@ class MaskedMsaHead(hk.Module): ...@@ -981,7 +986,7 @@ class MaskedMsaHead(hk.Module):
""" """
del batch del batch
logits = common_modules.Linear( logits = common_modules.Linear(
self.config.num_output, self.num_output,
initializer=utils.final_init(self.global_config), initializer=utils.final_init(self.global_config),
name='logits')( name='logits')(
representations['msa']) representations['msa'])
...@@ -989,7 +994,7 @@ class MaskedMsaHead(hk.Module): ...@@ -989,7 +994,7 @@ class MaskedMsaHead(hk.Module):
def loss(self, value, batch): def loss(self, value, batch):
errors = softmax_cross_entropy( errors = softmax_cross_entropy(
labels=jax.nn.one_hot(batch['true_msa'], num_classes=23), labels=jax.nn.one_hot(batch['true_msa'], num_classes=self.num_output),
logits=value['logits']) logits=value['logits'])
loss = (jnp.sum(errors * batch['bert_mask'], axis=(-2, -1)) / loss = (jnp.sum(errors * batch['bert_mask'], axis=(-2, -1)) /
(1e-8 + jnp.sum(batch['bert_mask'], axis=(-2, -1)))) (1e-8 + jnp.sum(batch['bert_mask'], axis=(-2, -1))))
...@@ -1009,7 +1014,7 @@ class PredictedLDDTHead(hk.Module): ...@@ -1009,7 +1014,7 @@ class PredictedLDDTHead(hk.Module):
self.global_config = global_config self.global_config = global_config
def __call__(self, representations, batch, is_training): def __call__(self, representations, batch, is_training):
"""Builds ExperimentallyResolvedHead module. """Builds PredictedLDDTHead module.
Arguments: Arguments:
representations: Dictionary of representations, must contain: representations: Dictionary of representations, must contain:
...@@ -1071,7 +1076,7 @@ class PredictedLDDTHead(hk.Module): ...@@ -1071,7 +1076,7 @@ class PredictedLDDTHead(hk.Module):
# Shape (batch_size, num_res, 1) # Shape (batch_size, num_res, 1)
true_points_mask=all_atom_mask[None, :, 1:2].astype(jnp.float32), true_points_mask=all_atom_mask[None, :, 1:2].astype(jnp.float32),
cutoff=15., cutoff=15.,
per_residue=True)[0] per_residue=True)
lddt_ca = jax.lax.stop_gradient(lddt_ca) lddt_ca = jax.lax.stop_gradient(lddt_ca)
num_bins = self.config.num_bins num_bins = self.config.num_bins
...@@ -1597,6 +1602,19 @@ class EvoformerIteration(hk.Module): ...@@ -1597,6 +1602,19 @@ class EvoformerIteration(hk.Module):
safe_key, *sub_keys = safe_key.split(10) safe_key, *sub_keys = safe_key.split(10)
sub_keys = iter(sub_keys) sub_keys = iter(sub_keys)
outer_module = OuterProductMean(
config=c.outer_product_mean,
global_config=self.global_config,
num_output_channel=int(pair_act.shape[-1]),
name='outer_product_mean')
if c.outer_product_mean.first:
pair_act = dropout_wrapper_fn(
outer_module,
msa_act,
msa_mask,
safe_key=next(sub_keys),
output_act=pair_act)
msa_act = dropout_wrapper_fn( msa_act = dropout_wrapper_fn(
MSARowAttentionWithPairBias( MSARowAttentionWithPairBias(
c.msa_row_attention_with_pair_bias, gc, c.msa_row_attention_with_pair_bias, gc,
...@@ -1624,16 +1642,13 @@ class EvoformerIteration(hk.Module): ...@@ -1624,16 +1642,13 @@ class EvoformerIteration(hk.Module):
msa_mask, msa_mask,
safe_key=next(sub_keys)) safe_key=next(sub_keys))
pair_act = dropout_wrapper_fn( if not c.outer_product_mean.first:
OuterProductMean( pair_act = dropout_wrapper_fn(
config=c.outer_product_mean, outer_module,
global_config=self.global_config, msa_act,
num_output_channel=int(pair_act.shape[-1]), msa_mask,
name='outer_product_mean'), safe_key=next(sub_keys),
msa_act, output_act=pair_act)
msa_mask,
safe_key=next(sub_keys),
output_act=pair_act)
pair_act = dropout_wrapper_fn( pair_act = dropout_wrapper_fn(
TriangleMultiplication(c.triangle_multiplication_outgoing, gc, TriangleMultiplication(c.triangle_multiplication_outgoing, gc,
...@@ -1730,8 +1745,7 @@ class EmbeddingsAndEvoformer(hk.Module): ...@@ -1730,8 +1745,7 @@ class EmbeddingsAndEvoformer(hk.Module):
True, True,
name='prev_msa_first_row_norm')( name='prev_msa_first_row_norm')(
batch['prev_msa_first_row']) batch['prev_msa_first_row'])
msa_activations = jax.ops.index_add(msa_activations, 0, msa_activations = msa_activations.at[0].add(prev_msa_first_row)
prev_msa_first_row)
if 'prev_pair' in batch: if 'prev_pair' in batch:
pair_activations += hk.LayerNorm([-1], pair_activations += hk.LayerNorm([-1],
......
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core modules, which have been refactored in AlphaFold-Multimer.
The main difference is that MSA sampling pipeline is moved inside the JAX model
for easier implementation of recycling and ensembling.
Lower-level modules up to EvoformerIteration are reused from modules.py.
"""
import functools
from typing import Sequence
from alphafold.common import residue_constants
from alphafold.model import all_atom_multimer
from alphafold.model import common_modules
from alphafold.model import folding_multimer
from alphafold.model import geometry
from alphafold.model import layer_stack
from alphafold.model import modules
from alphafold.model import prng
from alphafold.model import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
def reduce_fn(x, mode):
if mode == 'none' or mode is None:
return jnp.asarray(x)
elif mode == 'sum':
return jnp.asarray(x).sum()
elif mode == 'mean':
return jnp.mean(jnp.asarray(x))
else:
raise ValueError('Unsupported reduction option.')
def gumbel_noise(key: jnp.ndarray, shape: Sequence[int]) -> jnp.ndarray:
"""Generate Gumbel Noise of given Shape.
This generates samples from Gumbel(0, 1).
Args:
key: Jax random number key.
shape: Shape of noise to return.
Returns:
Gumbel noise of given shape.
"""
epsilon = 1e-6
uniform = utils.padding_consistent_rng(jax.random.uniform)
uniform_noise = uniform(
key, shape=shape, dtype=jnp.float32, minval=0., maxval=1.)
gumbel = -jnp.log(-jnp.log(uniform_noise + epsilon) + epsilon)
return gumbel
def gumbel_max_sample(key: jnp.ndarray, logits: jnp.ndarray) -> jnp.ndarray:
"""Samples from a probability distribution given by 'logits'.
This uses Gumbel-max trick to implement the sampling in an efficient manner.
Args:
key: prng key.
logits: Logarithm of probabilities to sample from, probabilities can be
unnormalized.
Returns:
Sample from logprobs in one-hot form.
"""
z = gumbel_noise(key, logits.shape)
return jax.nn.one_hot(
jnp.argmax(logits + z, axis=-1),
logits.shape[-1],
dtype=logits.dtype)
def gumbel_argsort_sample_idx(key: jnp.ndarray,
logits: jnp.ndarray) -> jnp.ndarray:
"""Samples with replacement from a distribution given by 'logits'.
This uses Gumbel trick to implement the sampling an efficient manner. For a
distribution over k items this samples k times without replacement, so this
is effectively sampling a random permutation with probabilities over the
permutations derived from the logprobs.
Args:
key: prng key.
logits: Logarithm of probabilities to sample from, probabilities can be
unnormalized.
Returns:
Sample from logprobs in one-hot form.
"""
z = gumbel_noise(key, logits.shape)
# This construction is equivalent to jnp.argsort, but using a non stable sort,
# since stable sort's aren't supported by jax2tf.
axis = len(logits.shape) - 1
iota = jax.lax.broadcasted_iota(jnp.int64, logits.shape, axis)
_, perm = jax.lax.sort_key_val(
logits + z, iota, dimension=-1, is_stable=False)
return perm[::-1]
def make_masked_msa(batch, key, config, epsilon=1e-6):
"""Create data for BERT on raw MSA."""
# Add a random amino acid uniformly.
random_aa = jnp.array([0.05] * 20 + [0., 0.], dtype=jnp.float32)
categorical_probs = (
config.uniform_prob * random_aa +
config.profile_prob * batch['msa_profile'] +
config.same_prob * jax.nn.one_hot(batch['msa'], 22))
# Put all remaining probability on [MASK] which is a new column.
pad_shapes = [[0, 0] for _ in range(len(categorical_probs.shape))]
pad_shapes[-1][1] = 1
mask_prob = 1. - config.profile_prob - config.same_prob - config.uniform_prob
assert mask_prob >= 0.
categorical_probs = jnp.pad(
categorical_probs, pad_shapes, constant_values=mask_prob)
sh = batch['msa'].shape
key, mask_subkey, gumbel_subkey = key.split(3)
uniform = utils.padding_consistent_rng(jax.random.uniform)
mask_position = uniform(mask_subkey.get(), sh) < config.replace_fraction
mask_position *= batch['msa_mask']
logits = jnp.log(categorical_probs + epsilon)
bert_msa = gumbel_max_sample(gumbel_subkey.get(), logits)
bert_msa = jnp.where(mask_position,
jnp.argmax(bert_msa, axis=-1), batch['msa'])
bert_msa *= batch['msa_mask']
# Mix real and masked MSA.
if 'bert_mask' in batch:
batch['bert_mask'] *= mask_position.astype(jnp.float32)
else:
batch['bert_mask'] = mask_position.astype(jnp.float32)
batch['true_msa'] = batch['msa']
batch['msa'] = bert_msa
return batch
def nearest_neighbor_clusters(batch, gap_agreement_weight=0.):
"""Assign each extra MSA sequence to its nearest neighbor in sampled MSA."""
# Determine how much weight we assign to each agreement. In theory, we could
# use a full blosum matrix here, but right now let's just down-weight gap
# agreement because it could be spurious.
# Never put weight on agreeing on BERT mask.
weights = jnp.array(
[1.] * 21 + [gap_agreement_weight] + [0.], dtype=jnp.float32)
msa_mask = batch['msa_mask']
msa_one_hot = jax.nn.one_hot(batch['msa'], 23)
extra_mask = batch['extra_msa_mask']
extra_one_hot = jax.nn.one_hot(batch['extra_msa'], 23)
msa_one_hot_masked = msa_mask[:, :, None] * msa_one_hot
extra_one_hot_masked = extra_mask[:, :, None] * extra_one_hot
agreement = jnp.einsum('mrc, nrc->nm', extra_one_hot_masked,
weights * msa_one_hot_masked)
cluster_assignment = jax.nn.softmax(1e3 * agreement, axis=0)
cluster_assignment *= jnp.einsum('mr, nr->mn', msa_mask, extra_mask)
cluster_count = jnp.sum(cluster_assignment, axis=-1)
cluster_count += 1. # We always include the sequence itself.
msa_sum = jnp.einsum('nm, mrc->nrc', cluster_assignment, extra_one_hot_masked)
msa_sum += msa_one_hot_masked
cluster_profile = msa_sum / cluster_count[:, None, None]
extra_deletion_matrix = batch['extra_deletion_matrix']
deletion_matrix = batch['deletion_matrix']
del_sum = jnp.einsum('nm, mc->nc', cluster_assignment,
extra_mask * extra_deletion_matrix)
del_sum += deletion_matrix # Original sequence.
cluster_deletion_mean = del_sum / cluster_count[:, None]
return cluster_profile, cluster_deletion_mean
def create_msa_feat(batch):
"""Create and concatenate MSA features."""
msa_1hot = jax.nn.one_hot(batch['msa'], 23)
deletion_matrix = batch['deletion_matrix']
has_deletion = jnp.clip(deletion_matrix, 0., 1.)[..., None]
deletion_value = (jnp.arctan(deletion_matrix / 3.) * (2. / jnp.pi))[..., None]
deletion_mean_value = (jnp.arctan(batch['cluster_deletion_mean'] / 3.) *
(2. / jnp.pi))[..., None]
msa_feat = [
msa_1hot,
has_deletion,
deletion_value,
batch['cluster_profile'],
deletion_mean_value
]
return jnp.concatenate(msa_feat, axis=-1)
def create_extra_msa_feature(batch, num_extra_msa):
"""Expand extra_msa into 1hot and concat with other extra msa features.
We do this as late as possible as the one_hot extra msa can be very large.
Args:
batch: a dictionary with the following keys:
* 'extra_msa': [num_seq, num_res] MSA that wasn't selected as a cluster
centre. Note - This isn't one-hotted.
* 'extra_deletion_matrix': [num_seq, num_res] Number of deletions at given
position.
num_extra_msa: Number of extra msa to use.
Returns:
Concatenated tensor of extra MSA features.
"""
# 23 = 20 amino acids + 'X' for unknown + gap + bert mask
extra_msa = batch['extra_msa'][:num_extra_msa]
deletion_matrix = batch['extra_deletion_matrix'][:num_extra_msa]
msa_1hot = jax.nn.one_hot(extra_msa, 23)
has_deletion = jnp.clip(deletion_matrix, 0., 1.)[..., None]
deletion_value = (jnp.arctan(deletion_matrix / 3.) * (2. / jnp.pi))[..., None]
extra_msa_mask = batch['extra_msa_mask'][:num_extra_msa]
return jnp.concatenate([msa_1hot, has_deletion, deletion_value],
axis=-1), extra_msa_mask
def sample_msa(key, batch, max_seq):
"""Sample MSA randomly, remaining sequences are stored as `extra_*`.
Args:
key: safe key for random number generation.
batch: batch to sample msa from.
max_seq: number of sequences to sample.
Returns:
Protein with sampled msa.
"""
# Sample uniformly among sequences with at least one non-masked position.
logits = (jnp.clip(jnp.sum(batch['msa_mask'], axis=-1), 0., 1.) - 1.) * 1e6
# The cluster_bias_mask can be used to preserve the first row (target
# sequence) for each chain, for example.
if 'cluster_bias_mask' not in batch:
cluster_bias_mask = jnp.pad(
jnp.zeros(batch['msa'].shape[0] - 1), (1, 0), constant_values=1.)
else:
cluster_bias_mask = batch['cluster_bias_mask']
logits += cluster_bias_mask * 1e6
index_order = gumbel_argsort_sample_idx(key.get(), logits)
sel_idx = index_order[:max_seq]
extra_idx = index_order[max_seq:]
for k in ['msa', 'deletion_matrix', 'msa_mask', 'bert_mask']:
if k in batch:
batch['extra_' + k] = batch[k][extra_idx]
batch[k] = batch[k][sel_idx]
return batch
def make_msa_profile(batch):
"""Compute the MSA profile."""
# Compute the profile for every residue (over all MSA sequences).
return utils.mask_mean(
batch['msa_mask'][:, :, None], jax.nn.one_hot(batch['msa'], 22), axis=0)
class AlphaFoldIteration(hk.Module):
"""A single recycling iteration of AlphaFold architecture.
Computes ensembled (averaged) representations from the provided features.
These representations are then passed to the various heads
that have been requested by the configuration file.
"""
def __init__(self, config, global_config, name='alphafold_iteration'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
batch,
is_training,
return_representations=False,
safe_key=None):
if is_training:
num_ensemble = np.asarray(self.config.num_ensemble_train)
else:
num_ensemble = np.asarray(self.config.num_ensemble_eval)
# Compute representations for each MSA sample and average.
embedding_module = EmbeddingsAndEvoformer(
self.config.embeddings_and_evoformer, self.global_config)
repr_shape = hk.eval_shape(
lambda: embedding_module(batch, is_training))
representations = {
k: jnp.zeros(v.shape, v.dtype) for (k, v) in repr_shape.items()
}
def ensemble_body(x, unused_y):
"""Add into representations ensemble."""
del unused_y
representations, safe_key = x
safe_key, safe_subkey = safe_key.split()
representations_update = embedding_module(
batch, is_training, safe_key=safe_subkey)
for k in representations:
if k not in {'msa', 'true_msa', 'bert_mask'}:
representations[k] += representations_update[k] * (
1. / num_ensemble).astype(representations[k].dtype)
else:
representations[k] = representations_update[k]
return (representations, safe_key), None
(representations, _), _ = hk.scan(
ensemble_body, (representations, safe_key), None, length=num_ensemble)
self.representations = representations
self.batch = batch
self.heads = {}
for head_name, head_config in sorted(self.config.heads.items()):
if not head_config.weight:
continue # Do not instantiate zero-weight heads.
head_factory = {
'masked_msa':
modules.MaskedMsaHead,
'distogram':
modules.DistogramHead,
'structure_module':
folding_multimer.StructureModule,
'predicted_aligned_error':
modules.PredictedAlignedErrorHead,
'predicted_lddt':
modules.PredictedLDDTHead,
'experimentally_resolved':
modules.ExperimentallyResolvedHead,
}[head_name]
self.heads[head_name] = (head_config,
head_factory(head_config, self.global_config))
structure_module_output = None
if 'entity_id' in batch and 'all_atom_positions' in batch:
_, fold_module = self.heads['structure_module']
structure_module_output = fold_module(representations, batch, is_training)
ret = {}
ret['representations'] = representations
for name, (head_config, module) in self.heads.items():
if name == 'structure_module' and structure_module_output is not None:
ret[name] = structure_module_output
representations['structure_module'] = structure_module_output.pop('act')
# Skip confidence heads until StructureModule is executed.
elif name in {'predicted_lddt', 'predicted_aligned_error',
'experimentally_resolved'}:
continue
else:
ret[name] = module(representations, batch, is_training)
# Add confidence heads after StructureModule is executed.
if self.config.heads.get('predicted_lddt.weight', 0.0):
name = 'predicted_lddt'
head_config, module = self.heads[name]
ret[name] = module(representations, batch, is_training)
if self.config.heads.experimentally_resolved.weight:
name = 'experimentally_resolved'
head_config, module = self.heads[name]
ret[name] = module(representations, batch, is_training)
if self.config.heads.get('predicted_aligned_error.weight', 0.0):
name = 'predicted_aligned_error'
head_config, module = self.heads[name]
ret[name] = module(representations, batch, is_training)
# Will be used for ipTM computation.
ret[name]['asym_id'] = batch['asym_id']
return ret
class AlphaFold(hk.Module):
"""AlphaFold-Multimer model with recycling.
"""
def __init__(self, config, name='alphafold'):
super().__init__(name=name)
self.config = config
self.global_config = config.global_config
def __call__(
self,
batch,
is_training,
return_representations=False,
safe_key=None):
c = self.config
impl = AlphaFoldIteration(c, self.global_config)
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
elif isinstance(safe_key, jnp.ndarray):
safe_key = prng.SafeKey(safe_key)
assert isinstance(batch, dict)
num_res = batch['aatype'].shape[0]
def get_prev(ret):
new_prev = {
'prev_pos':
ret['structure_module']['final_atom_positions'],
'prev_msa_first_row': ret['representations']['msa_first_row'],
'prev_pair': ret['representations']['pair'],
}
return jax.tree_map(jax.lax.stop_gradient, new_prev)
def apply_network(prev, safe_key):
recycled_batch = {**batch, **prev}
return impl(
batch=recycled_batch,
is_training=is_training,
safe_key=safe_key)
if self.config.num_recycle:
emb_config = self.config.embeddings_and_evoformer
prev = {
'prev_pos':
jnp.zeros([num_res, residue_constants.atom_type_num, 3]),
'prev_msa_first_row':
jnp.zeros([num_res, emb_config.msa_channel]),
'prev_pair':
jnp.zeros([num_res, num_res, emb_config.pair_channel]),
}
if 'num_iter_recycling' in batch:
# Training time: num_iter_recycling is in batch.
# Value for each ensemble batch is the same, so arbitrarily taking 0-th.
num_iter = batch['num_iter_recycling'][0]
# Add insurance that even when ensembling, we will not run more
# recyclings than the model is configured to run.
num_iter = jnp.minimum(num_iter, c.num_recycle)
else:
# Eval mode or tests: use the maximum number of iterations.
num_iter = c.num_recycle
def recycle_body(i, x):
del i
prev, safe_key = x
safe_key1, safe_key2 = safe_key.split() if c.resample_msa_in_recycling else safe_key.duplicate() # pylint: disable=line-too-long
ret = apply_network(prev=prev, safe_key=safe_key2)
return get_prev(ret), safe_key1
prev, safe_key = hk.fori_loop(0, num_iter, recycle_body, (prev, safe_key))
else:
prev = {}
# Run extra iteration.
ret = apply_network(prev=prev, safe_key=safe_key)
if not return_representations:
del ret['representations']
return ret
class EmbeddingsAndEvoformer(hk.Module):
"""Embeds the input data and runs Evoformer.
Produces the MSA, single and pair representations.
"""
def __init__(self, config, global_config, name='evoformer'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def _relative_encoding(self, batch):
"""Add relative position encodings.
For position (i, j), the value is (i-j) clipped to [-k, k] and one-hotted.
When not using 'use_chain_relative' the residue indices are used as is, e.g.
for heteromers relative positions will be computed using the positions in
the corresponding chains.
When using 'use_chain_relative' we add an extra bin that denotes
'different chain'. Furthermore we also provide the relative chain index
(i.e. sym_id) clipped and one-hotted to the network. And an extra feature
which denotes whether they belong to the same chain type, i.e. it's 0 if
they are in different heteromer chains and 1 otherwise.
Args:
batch: batch.
Returns:
Feature embedding using the features as described before.
"""
c = self.config
rel_feats = []
pos = batch['residue_index']
asym_id = batch['asym_id']
asym_id_same = jnp.equal(asym_id[:, None], asym_id[None, :])
offset = pos[:, None] - pos[None, :]
clipped_offset = jnp.clip(
offset + c.max_relative_idx, a_min=0, a_max=2 * c.max_relative_idx)
if c.use_chain_relative:
final_offset = jnp.where(asym_id_same, clipped_offset,
(2 * c.max_relative_idx + 1) *
jnp.ones_like(clipped_offset))
rel_pos = jax.nn.one_hot(final_offset, 2 * c.max_relative_idx + 2)
rel_feats.append(rel_pos)
entity_id = batch['entity_id']
entity_id_same = jnp.equal(entity_id[:, None], entity_id[None, :])
rel_feats.append(entity_id_same.astype(rel_pos.dtype)[..., None])
sym_id = batch['sym_id']
rel_sym_id = sym_id[:, None] - sym_id[None, :]
max_rel_chain = c.max_relative_chain
clipped_rel_chain = jnp.clip(
rel_sym_id + max_rel_chain, a_min=0, a_max=2 * max_rel_chain)
final_rel_chain = jnp.where(entity_id_same, clipped_rel_chain,
(2 * max_rel_chain + 1) *
jnp.ones_like(clipped_rel_chain))
rel_chain = jax.nn.one_hot(final_rel_chain, 2 * c.max_relative_chain + 2)
rel_feats.append(rel_chain)
else:
rel_pos = jax.nn.one_hot(clipped_offset, 2 * c.max_relative_idx + 1)
rel_feats.append(rel_pos)
rel_feat = jnp.concatenate(rel_feats, axis=-1)
return common_modules.Linear(
c.pair_channel,
name='position_activations')(
rel_feat)
def __call__(self, batch, is_training, safe_key=None):
c = self.config
gc = self.global_config
batch = dict(batch)
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
output = {}
batch['msa_profile'] = make_msa_profile(batch)
target_feat = jax.nn.one_hot(batch['aatype'], 21)
preprocess_1d = common_modules.Linear(
c.msa_channel, name='preprocess_1d')(
target_feat)
safe_key, sample_key, mask_key = safe_key.split(3)
batch = sample_msa(sample_key, batch, c.num_msa)
batch = make_masked_msa(batch, mask_key, c.masked_msa)
(batch['cluster_profile'],
batch['cluster_deletion_mean']) = nearest_neighbor_clusters(batch)
msa_feat = create_msa_feat(batch)
preprocess_msa = common_modules.Linear(
c.msa_channel, name='preprocess_msa')(
msa_feat)
msa_activations = jnp.expand_dims(preprocess_1d, axis=0) + preprocess_msa
left_single = common_modules.Linear(
c.pair_channel, name='left_single')(
target_feat)
right_single = common_modules.Linear(
c.pair_channel, name='right_single')(
target_feat)
pair_activations = left_single[:, None] + right_single[None]
mask_2d = batch['seq_mask'][:, None] * batch['seq_mask'][None, :]
mask_2d = mask_2d.astype(jnp.float32)
if c.recycle_pos and 'prev_pos' in batch:
prev_pseudo_beta = modules.pseudo_beta_fn(
batch['aatype'], batch['prev_pos'], None)
dgram = modules.dgram_from_positions(
prev_pseudo_beta, **self.config.prev_pos)
pair_activations += common_modules.Linear(
c.pair_channel, name='prev_pos_linear')(
dgram)
if c.recycle_features:
if 'prev_msa_first_row' in batch:
prev_msa_first_row = hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='prev_msa_first_row_norm')(
batch['prev_msa_first_row'])
msa_activations = msa_activations.at[0].add(prev_msa_first_row)
if 'prev_pair' in batch:
pair_activations += hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='prev_pair_norm')(
batch['prev_pair'])
if c.max_relative_idx:
pair_activations += self._relative_encoding(batch)
if c.template.enabled:
template_module = TemplateEmbedding(c.template, gc)
template_batch = {
'template_aatype': batch['template_aatype'],
'template_all_atom_positions': batch['template_all_atom_positions'],
'template_all_atom_mask': batch['template_all_atom_mask']
}
# Construct a mask such that only intra-chain template features are
# computed, since all templates are for each chain individually.
multichain_mask = batch['asym_id'][:, None] == batch['asym_id'][None, :]
safe_key, safe_subkey = safe_key.split()
template_act = template_module(
query_embedding=pair_activations,
template_batch=template_batch,
padding_mask_2d=mask_2d,
multichain_mask_2d=multichain_mask,
is_training=is_training,
safe_key=safe_subkey)
pair_activations += template_act
# Extra MSA stack.
(extra_msa_feat,
extra_msa_mask) = create_extra_msa_feature(batch, c.num_extra_msa)
extra_msa_activations = common_modules.Linear(
c.extra_msa_channel,
name='extra_msa_activations')(
extra_msa_feat)
extra_msa_mask = extra_msa_mask.astype(jnp.float32)
extra_evoformer_input = {
'msa': extra_msa_activations,
'pair': pair_activations,
}
extra_masks = {'msa': extra_msa_mask, 'pair': mask_2d}
extra_evoformer_iteration = modules.EvoformerIteration(
c.evoformer, gc, is_extra_msa=True, name='extra_msa_stack')
def extra_evoformer_fn(x):
act, safe_key = x
safe_key, safe_subkey = safe_key.split()
extra_evoformer_output = extra_evoformer_iteration(
activations=act,
masks=extra_masks,
is_training=is_training,
safe_key=safe_subkey)
return (extra_evoformer_output, safe_key)
if gc.use_remat:
extra_evoformer_fn = hk.remat(extra_evoformer_fn)
safe_key, safe_subkey = safe_key.split()
extra_evoformer_stack = layer_stack.layer_stack(
c.extra_msa_stack_num_block)(
extra_evoformer_fn)
extra_evoformer_output, safe_key = extra_evoformer_stack(
(extra_evoformer_input, safe_subkey))
pair_activations = extra_evoformer_output['pair']
# Get the size of the MSA before potentially adding templates, so we
# can crop out the templates later.
num_msa_sequences = msa_activations.shape[0]
evoformer_input = {
'msa': msa_activations,
'pair': pair_activations,
}
evoformer_masks = {'msa': batch['msa_mask'].astype(jnp.float32),
'pair': mask_2d}
if c.template.enabled:
template_features, template_masks = (
template_embedding_1d(batch=batch, num_channel=c.msa_channel))
evoformer_input['msa'] = jnp.concatenate(
[evoformer_input['msa'], template_features], axis=0)
evoformer_masks['msa'] = jnp.concatenate(
[evoformer_masks['msa'], template_masks], axis=0)
evoformer_iteration = modules.EvoformerIteration(
c.evoformer, gc, is_extra_msa=False, name='evoformer_iteration')
def evoformer_fn(x):
act, safe_key = x
safe_key, safe_subkey = safe_key.split()
evoformer_output = evoformer_iteration(
activations=act,
masks=evoformer_masks,
is_training=is_training,
safe_key=safe_subkey)
return (evoformer_output, safe_key)
if gc.use_remat:
evoformer_fn = hk.remat(evoformer_fn)
safe_key, safe_subkey = safe_key.split()
evoformer_stack = layer_stack.layer_stack(c.evoformer_num_block)(
evoformer_fn)
def run_evoformer(evoformer_input):
evoformer_output, _ = evoformer_stack((evoformer_input, safe_subkey))
return evoformer_output
evoformer_output = run_evoformer(evoformer_input)
msa_activations = evoformer_output['msa']
pair_activations = evoformer_output['pair']
single_activations = common_modules.Linear(
c.seq_channel, name='single_activations')(
msa_activations[0])
output.update({
'single':
single_activations,
'pair':
pair_activations,
# Crop away template rows such that they are not used in MaskedMsaHead.
'msa':
msa_activations[:num_msa_sequences, :, :],
'msa_first_row':
msa_activations[0],
})
return output
class TemplateEmbedding(hk.Module):
"""Embed a set of templates."""
def __init__(self, config, global_config, name='template_embedding'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, query_embedding, template_batch, padding_mask_2d,
multichain_mask_2d, is_training,
safe_key=None):
"""Generate an embedding for a set of templates.
Args:
query_embedding: [num_res, num_res, num_channel] a query tensor that will
be used to attend over the templates to remove the num_templates
dimension.
template_batch: A dictionary containing:
`template_aatype`: [num_templates, num_res] aatype for each template.
`template_all_atom_positions`: [num_templates, num_res, 37, 3] atom
positions for all templates.
`template_all_atom_mask`: [num_templates, num_res, 37] mask for each
template.
padding_mask_2d: [num_res, num_res] Pair mask for attention operations.
multichain_mask_2d: [num_res, num_res] Mask indicating which residue pairs
are intra-chain, used to mask out residue distance based features
between chains.
is_training: bool indicating where we are running in training mode.
safe_key: random key generator.
Returns:
An embedding of size [num_res, num_res, num_channels]
"""
c = self.config
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
num_templates = template_batch['template_aatype'].shape[0]
num_res, _, query_num_channels = query_embedding.shape
# Embed each template separately.
template_embedder = SingleTemplateEmbedding(self.config, self.global_config)
def partial_template_embedder(template_aatype,
template_all_atom_positions,
template_all_atom_mask,
unsafe_key):
safe_key = prng.SafeKey(unsafe_key)
return template_embedder(query_embedding,
template_aatype,
template_all_atom_positions,
template_all_atom_mask,
padding_mask_2d,
multichain_mask_2d,
is_training,
safe_key)
safe_key, unsafe_key = safe_key.split()
unsafe_keys = jax.random.split(unsafe_key._key, num_templates)
def scan_fn(carry, x):
return carry + partial_template_embedder(*x), None
scan_init = jnp.zeros((num_res, num_res, c.num_channels),
dtype=query_embedding.dtype)
summed_template_embeddings, _ = hk.scan(
scan_fn, scan_init,
(template_batch['template_aatype'],
template_batch['template_all_atom_positions'],
template_batch['template_all_atom_mask'], unsafe_keys))
embedding = summed_template_embeddings / num_templates
embedding = jax.nn.relu(embedding)
embedding = common_modules.Linear(
query_num_channels,
initializer='relu',
name='output_linear')(embedding)
return embedding
class SingleTemplateEmbedding(hk.Module):
"""Embed a single template."""
def __init__(self, config, global_config, name='single_template_embedding'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, query_embedding, template_aatype,
template_all_atom_positions, template_all_atom_mask,
padding_mask_2d, multichain_mask_2d, is_training,
safe_key):
"""Build the single template embedding graph.
Args:
query_embedding: (num_res, num_res, num_channels) - embedding of the
query sequence/msa.
template_aatype: [num_res] aatype for each template.
template_all_atom_positions: [num_res, 37, 3] atom positions for all
templates.
template_all_atom_mask: [num_res, 37] mask for each template.
padding_mask_2d: Padding mask (Note: this doesn't care if a template
exists, unlike the template_pseudo_beta_mask).
multichain_mask_2d: A mask indicating intra-chain residue pairs, used
to mask out between chain distances/features when templates are for
single chains.
is_training: Are we in training mode.
safe_key: Random key generator.
Returns:
A template embedding (num_res, num_res, num_channels).
"""
gc = self.global_config
c = self.config
assert padding_mask_2d.dtype == query_embedding.dtype
dtype = query_embedding.dtype
num_channels = self.config.num_channels
def construct_input(query_embedding, template_aatype,
template_all_atom_positions, template_all_atom_mask,
multichain_mask_2d):
# Compute distogram feature for the template.
template_positions, pseudo_beta_mask = modules.pseudo_beta_fn(
template_aatype, template_all_atom_positions, template_all_atom_mask)
pseudo_beta_mask_2d = (pseudo_beta_mask[:, None] *
pseudo_beta_mask[None, :])
pseudo_beta_mask_2d *= multichain_mask_2d
template_dgram = modules.dgram_from_positions(
template_positions, **self.config.dgram_features)
template_dgram *= pseudo_beta_mask_2d[..., None]
template_dgram = template_dgram.astype(dtype)
pseudo_beta_mask_2d = pseudo_beta_mask_2d.astype(dtype)
to_concat = [(template_dgram, 1), (pseudo_beta_mask_2d, 0)]
aatype = jax.nn.one_hot(template_aatype, 22, axis=-1, dtype=dtype)
to_concat.append((aatype[None, :, :], 1))
to_concat.append((aatype[:, None, :], 1))
# Compute a feature representing the normalized vector between each
# backbone affine - i.e. in each residues local frame, what direction are
# each of the other residues.
raw_atom_pos = template_all_atom_positions
atom_pos = geometry.Vec3Array.from_array(raw_atom_pos)
rigid, backbone_mask = folding_multimer.make_backbone_affine(
atom_pos,
template_all_atom_mask,
template_aatype)
points = rigid.translation
rigid_vec = rigid[:, None].inverse().apply_to_point(points)
unit_vector = rigid_vec.normalized()
unit_vector = [unit_vector.x, unit_vector.y, unit_vector.z]
backbone_mask_2d = backbone_mask[:, None] * backbone_mask[None, :]
backbone_mask_2d *= multichain_mask_2d
unit_vector = [x*backbone_mask_2d for x in unit_vector]
# Note that the backbone_mask takes into account C, CA and N (unlike
# pseudo beta mask which just needs CB) so we add both masks as features.
to_concat.extend([(x, 0) for x in unit_vector])
to_concat.append((backbone_mask_2d, 0))
query_embedding = hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='query_embedding_norm')(
query_embedding)
# Allow the template embedder to see the query embedding. Note this
# contains the position relative feature, so this is how the network knows
# which residues are next to each other.
to_concat.append((query_embedding, 1))
act = 0
for i, (x, n_input_dims) in enumerate(to_concat):
act += common_modules.Linear(
num_channels,
num_input_dims=n_input_dims,
initializer='relu',
name=f'template_pair_embedding_{i}')(x)
return act
act = construct_input(query_embedding, template_aatype,
template_all_atom_positions, template_all_atom_mask,
multichain_mask_2d)
template_iteration = TemplateEmbeddingIteration(
c.template_pair_stack, gc, name='template_embedding_iteration')
def template_iteration_fn(x):
act, safe_key = x
safe_key, safe_subkey = safe_key.split()
act = template_iteration(
act=act,
pair_mask=padding_mask_2d,
is_training=is_training,
safe_key=safe_subkey)
return (act, safe_key)
if gc.use_remat:
template_iteration_fn = hk.remat(template_iteration_fn)
safe_key, safe_subkey = safe_key.split()
template_stack = layer_stack.layer_stack(
c.template_pair_stack.num_block)(
template_iteration_fn)
act, safe_key = template_stack((act, safe_subkey))
act = hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='output_layer_norm')(
act)
return act
class TemplateEmbeddingIteration(hk.Module):
"""Single Iteration of Template Embedding."""
def __init__(self, config, global_config,
name='template_embedding_iteration'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, act, pair_mask, is_training=True,
safe_key=None):
"""Build a single iteration of the template embedder.
Args:
act: [num_res, num_res, num_channel] Input pairwise activations.
pair_mask: [num_res, num_res] padding mask.
is_training: Whether to run in training mode.
safe_key: Safe pseudo-random generator key.
Returns:
[num_res, num_res, num_channel] tensor of activations.
"""
c = self.config
gc = self.global_config
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
dropout_wrapper_fn = functools.partial(
modules.dropout_wrapper,
is_training=is_training,
global_config=gc)
safe_key, *sub_keys = safe_key.split(20)
sub_keys = iter(sub_keys)
act = dropout_wrapper_fn(
modules.TriangleMultiplication(c.triangle_multiplication_outgoing, gc,
name='triangle_multiplication_outgoing'),
act,
pair_mask,
safe_key=next(sub_keys))
act = dropout_wrapper_fn(
modules.TriangleMultiplication(c.triangle_multiplication_incoming, gc,
name='triangle_multiplication_incoming'),
act,
pair_mask,
safe_key=next(sub_keys))
act = dropout_wrapper_fn(
modules.TriangleAttention(c.triangle_attention_starting_node, gc,
name='triangle_attention_starting_node'),
act,
pair_mask,
safe_key=next(sub_keys))
act = dropout_wrapper_fn(
modules.TriangleAttention(c.triangle_attention_ending_node, gc,
name='triangle_attention_ending_node'),
act,
pair_mask,
safe_key=next(sub_keys))
act = dropout_wrapper_fn(
modules.Transition(c.pair_transition, gc,
name='pair_transition'),
act,
pair_mask,
safe_key=next(sub_keys))
return act
def template_embedding_1d(batch, num_channel):
"""Embed templates into an (num_res, num_templates, num_channels) embedding.
Args:
batch: A batch containing:
template_aatype, (num_templates, num_res) aatype for the templates.
template_all_atom_positions, (num_templates, num_residues, 37, 3) atom
positions for the templates.
template_all_atom_mask, (num_templates, num_residues, 37) atom mask for
each template.
num_channel: The number of channels in the output.
Returns:
An embedding of shape (num_templates, num_res, num_channels) and a mask of
shape (num_templates, num_res).
"""
# Embed the templates aatypes.
aatype_one_hot = jax.nn.one_hot(batch['template_aatype'], 22, axis=-1)
num_templates = batch['template_aatype'].shape[0]
all_chi_angles = []
all_chi_masks = []
for i in range(num_templates):
atom_pos = geometry.Vec3Array.from_array(
batch['template_all_atom_positions'][i, :, :, :])
template_chi_angles, template_chi_mask = all_atom_multimer.compute_chi_angles(
atom_pos,
batch['template_all_atom_mask'][i, :, :],
batch['template_aatype'][i, :])
all_chi_angles.append(template_chi_angles)
all_chi_masks.append(template_chi_mask)
chi_angles = jnp.stack(all_chi_angles, axis=0)
chi_mask = jnp.stack(all_chi_masks, axis=0)
template_features = jnp.concatenate([
aatype_one_hot,
jnp.sin(chi_angles) * chi_mask,
jnp.cos(chi_angles) * chi_mask,
chi_mask], axis=-1)
template_mask = chi_mask[:, :, 0]
template_activations = common_modules.Linear(
num_channel,
initializer='relu',
name='template_single_embedding')(
template_features)
template_activations = jax.nn.relu(template_activations)
template_activations = common_modules.Linear(
num_channel,
initializer='relu',
name='template_projection')(
template_activations)
return template_activations, template_mask
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
"""A collection of JAX utility functions for use in protein folding.""" """A collection of JAX utility functions for use in protein folding."""
import collections import collections
import functools
import numbers import numbers
from typing import Mapping from typing import Mapping
...@@ -79,3 +80,52 @@ def flat_params_to_haiku(params: Mapping[str, np.ndarray]) -> hk.Params: ...@@ -79,3 +80,52 @@ def flat_params_to_haiku(params: Mapping[str, np.ndarray]) -> hk.Params:
hk_params[scope][name] = jnp.array(array) hk_params[scope][name] = jnp.array(array)
return hk_params return hk_params
def padding_consistent_rng(f):
"""Modify any element-wise random function to be consistent with padding.
Normally if you take a function like jax.random.normal and generate an array,
say of size (10,10), you will get a different set of random numbers to if you
add padding and take the first (10,10) sub-array.
This function makes a random function that is consistent regardless of the
amount of padding added.
Note: The padding-consistent function is likely to be slower to compile and
run than the function it is wrapping, but these slowdowns are likely to be
negligible in a large network.
Args:
f: Any element-wise function that takes (PRNG key, shape) as the first 2
arguments.
Returns:
An equivalent function to f, that is now consistent for different amounts of
padding.
"""
def grid_keys(key, shape):
"""Generate a grid of rng keys that is consistent with different padding.
Generate random keys such that the keys will be identical, regardless of
how much padding is added to any dimension.
Args:
key: A PRNG key.
shape: The shape of the output array of keys that will be generated.
Returns:
An array of shape `shape` consisting of random keys.
"""
if not shape:
return key
new_keys = jax.vmap(functools.partial(jax.random.fold_in, key))(
jnp.arange(shape[0]))
return jax.vmap(functools.partial(grid_keys, shape=shape[1:]))(new_keys)
def inner(key, shape, **kwargs):
return jnp.vectorize(
lambda key: f(key, shape=(), **kwargs),
signature='(2)->()')(
grid_keys(key, shape))
return inner
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AlphaFold Colab notebook."""
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for the AlphaFold Colab notebook."""
import enum
import json
from typing import Any, Mapping, Optional, Sequence, Tuple
from alphafold.common import residue_constants
from alphafold.data import parsers
from matplotlib import pyplot as plt
import numpy as np
@enum.unique
class ModelType(enum.Enum):
MONOMER = 0
MULTIMER = 1
def clean_and_validate_sequence(
input_sequence: str, min_length: int, max_length: int) -> str:
"""Checks that the input sequence is ok and returns a clean version of it."""
# Remove all whitespaces, tabs and end lines; upper-case.
clean_sequence = input_sequence.translate(
str.maketrans('', '', ' \n\t')).upper()
aatypes = set(residue_constants.restypes) # 20 standard aatypes.
if not set(clean_sequence).issubset(aatypes):
raise ValueError(
f'Input sequence contains non-amino acid letters: '
f'{set(clean_sequence) - aatypes}. AlphaFold only supports 20 standard '
'amino acids as inputs.')
if len(clean_sequence) < min_length:
raise ValueError(
f'Input sequence is too short: {len(clean_sequence)} amino acids, '
f'while the minimum is {min_length}')
if len(clean_sequence) > max_length:
raise ValueError(
f'Input sequence is too long: {len(clean_sequence)} amino acids, while '
f'the maximum is {max_length}. You may be able to run it with the full '
f'AlphaFold system depending on your resources (system memory, '
f'GPU memory).')
return clean_sequence
def validate_input(
input_sequences: Sequence[str],
min_length: int,
max_length: int,
max_multimer_length: int) -> Tuple[Sequence[str], ModelType]:
"""Validates and cleans input sequences and determines which model to use."""
sequences = []
for input_sequence in input_sequences:
if input_sequence.strip():
input_sequence = clean_and_validate_sequence(
input_sequence=input_sequence,
min_length=min_length,
max_length=max_length)
sequences.append(input_sequence)
if len(sequences) == 1:
print('Using the single-chain model.')
return sequences, ModelType.MONOMER
elif len(sequences) > 1:
total_multimer_length = sum([len(seq) for seq in sequences])
if total_multimer_length > max_multimer_length:
raise ValueError(f'The total length of multimer sequences is too long: '
f'{total_multimer_length}, while the maximum is '
f'{max_multimer_length}. Please use the full AlphaFold '
f'system for long multimers.')
elif total_multimer_length > 1536:
print('WARNING: The accuracy of the system has not been fully validated '
'above 1536 residues, and you may experience long running times or '
f'run out of memory for your complex with {total_multimer_length} '
'residues.')
print(f'Using the multimer model with {len(sequences)} sequences.')
return sequences, ModelType.MULTIMER
else:
raise ValueError('No input amino acid sequence provided, please provide at '
'least one sequence.')
def merge_chunked_msa(
results: Sequence[Mapping[str, Any]],
max_hits: Optional[int] = None
) -> parsers.Msa:
"""Merges chunked database hits together into hits for the full database."""
unsorted_results = []
for chunk_index, chunk in enumerate(results):
msa = parsers.parse_stockholm(chunk['sto'])
e_values_dict = parsers.parse_e_values_from_tblout(chunk['tbl'])
# Jackhmmer lists sequences as <sequence name>/<residue from>-<residue to>.
e_values = [e_values_dict[t.partition('/')[0]] for t in msa.descriptions]
chunk_results = zip(
msa.sequences, msa.deletion_matrix, msa.descriptions, e_values)
if chunk_index != 0:
next(chunk_results) # Only take query (first hit) from the first chunk.
unsorted_results.extend(chunk_results)
sorted_by_evalue = sorted(unsorted_results, key=lambda x: x[-1])
merged_sequences, merged_deletion_matrix, merged_descriptions, _ = zip(
*sorted_by_evalue)
merged_msa = parsers.Msa(sequences=merged_sequences,
deletion_matrix=merged_deletion_matrix,
descriptions=merged_descriptions)
if max_hits is not None:
merged_msa = merged_msa.truncate(max_seqs=max_hits)
return merged_msa
def show_msa_info(
single_chain_msas: Sequence[parsers.Msa],
sequence_index: int):
"""Prints info and shows a plot of the deduplicated single chain MSA."""
full_single_chain_msa = []
for single_chain_msa in single_chain_msas:
full_single_chain_msa.extend(single_chain_msa.sequences)
# Deduplicate but preserve order (hence can't use set).
deduped_full_single_chain_msa = list(dict.fromkeys(full_single_chain_msa))
total_msa_size = len(deduped_full_single_chain_msa)
print(f'\n{total_msa_size} unique sequences found in total for sequence '
f'{sequence_index}\n')
aa_map = {res: i for i, res in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ-')}
msa_arr = np.array(
[[aa_map[aa] for aa in seq] for seq in deduped_full_single_chain_msa])
plt.figure(figsize=(12, 3))
plt.title(f'Per-Residue Count of Non-Gap Amino Acids in the MSA for Sequence '
f'{sequence_index}')
plt.plot(np.sum(msa_arr != aa_map['-'], axis=0), color='black')
plt.ylabel('Non-Gap Count')
plt.yticks(range(0, total_msa_size + 1, max(1, int(total_msa_size / 3))))
plt.show()
def empty_placeholder_template_features(
num_templates: int, num_res: int) -> Mapping[str, np.ndarray]:
return {
'template_aatype': np.zeros(
(num_templates, num_res,
len(residue_constants.restypes_with_x_and_gap)), dtype=np.float32),
'template_all_atom_masks': np.zeros(
(num_templates, num_res, residue_constants.atom_type_num),
dtype=np.float32),
'template_all_atom_positions': np.zeros(
(num_templates, num_res, residue_constants.atom_type_num, 3),
dtype=np.float32),
'template_domain_names': np.zeros([num_templates], dtype=np.object),
'template_sequence': np.zeros([num_templates], dtype=np.object),
'template_sum_probs': np.zeros([num_templates], dtype=np.float32),
}
def get_pae_json(pae: np.ndarray, max_pae: float) -> str:
"""Returns the PAE in the same format as is used in the AFDB."""
rounded_errors = np.round(pae.astype(np.float64), decimals=1)
indices = np.indices((len(rounded_errors), len(rounded_errors))) + 1
indices_1 = indices[0].flatten().tolist()
indices_2 = indices[1].flatten().tolist()
return json.dumps(
[{'residue1': indices_1,
'residue2': indices_2,
'distance': rounded_errors.flatten().tolist(),
'max_predicted_aligned_error': max_pae}],
indent=None, separators=(',', ':'))
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for notebook_utils."""
import io
from absl.testing import absltest
from absl.testing import parameterized
from alphafold.data import parsers
from alphafold.data import templates
from alphafold.notebooks import notebook_utils
import mock
import numpy as np
ONLY_QUERY_HIT = {
'sto': (
'# STOCKHOLM 1.0\n'
'#=GF ID query-i1\n'
'query MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEH\n'
'//\n'),
'tbl': '',
'stderr': b'',
'n_iter': 1,
'e_value': 0.0001}
# pylint: disable=line-too-long
MULTI_SEQUENCE_HIT_1 = {
'sto': (
'# STOCKHOLM 1.0\n'
'#=GF ID query-i1\n'
'#=GS ERR1700680_4602609/41-109 DE [subseq from] ERR1700680_4602609\n'
'#=GS ERR1019366_5760491/40-105 DE [subseq from] ERR1019366_5760491\n'
'#=GS SRR5580704_12853319/61-125 DE [subseq from] SRR5580704_12853319\n'
'query MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEHAAQAAKHDAEHHAPKPH\n'
'ERR1700680_4602609/41-109 --INKGAEYHKKAAEHHELAAKHHREAAKHHEAGSHEKAAHHSEIAAGHGLTAVHHTEEATK-HHPEEHTEK--\n'
'ERR1019366_5760491/40-105 ---RSGAQHHDAAAQHYEEAARHHRMAAKQYQASHHEKAAHYAQLAYAHHMYAEQHAAEAAK-AHAKNHG----\n'
'SRR5580704_12853319/61-125 ----PAADHHMKAAEHHEEAAKHHRAAAEHHTAGDHQKAGHHAHVANGHHVNAVHHAEEASK-HHATDHS----\n'
'//\n'),
'tbl': (
'ERR1700680_4602609 - query - 7.7e-09 47.7 33.8 1.1e-08 47.2 33.8 1.2 1 0 0 1 1 1 1 -\n'
'ERR1019366_5760491 - query - 1.7e-08 46.6 33.1 2.5e-08 46.1 33.1 1.3 1 0 0 1 1 1 1 -\n'
'SRR5580704_12853319 - query - 1.1e-07 44.0 41.6 2e-07 43.1 41.6 1.4 1 0 0 1 1 1 1 -\n'),
'stderr': b'',
'n_iter': 1,
'e_value': 0.0001}
MULTI_SEQUENCE_HIT_2 = {
'sto': (
'# STOCKHOLM 1.0\n'
'#=GF ID query-i1\n'
'#=GS ERR1700719_3476944/70-137 DE [subseq from] ERR1700719_3476944\n'
'#=GS ERR1700761_4254522/72-138 DE [subseq from] ERR1700761_4254522\n'
'#=GS SRR5438477_9761204/64-132 DE [subseq from] SRR5438477_9761204\n'
'query MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEHAAQAAKHDAEHHAPKPH\n'
'ERR1700719_3476944/70-137 ---KQAAEHHHQAAEHHEHAARHHREAAKHHEAGDHESAAHHAHTAQGHLHQATHHASEAAKLHVEHHGQK--\n'
'ERR1700761_4254522/72-138 ----QASEHHNLAAEHHEHAARHHRDAAKHHKAGDHEKAAHHAHVAHGHHLHATHHATEAAKHHVEAHGEK--\n'
'SRR5438477_9761204/64-132 MPKHEGAEHHKKAAEHNEHAARHHKEAARHHEEGSHEKVGHHAHIAHGHHLHATHHAEEAAKTHSNQHE----\n'
'//\n'),
'tbl': (
'ERR1700719_3476944 - query - 2e-07 43.2 47.5 3.5e-07 42.4 47.5 1.4 1 0 0 1 1 1 1 -\n'
'ERR1700761_4254522 - query - 6.1e-07 41.6 48.1 8.1e-07 41.3 48.1 1.2 1 0 0 1 1 1 1 -\n'
'SRR5438477_9761204 - query - 1.8e-06 40.2 46.9 2.3e-06 39.8 46.9 1.2 1 0 0 1 1 1 1 -\n'),
'stderr': b'',
'n_iter': 1,
'e_value': 0.0001}
# pylint: enable=line-too-long
class NotebookUtilsTest(parameterized.TestCase):
@parameterized.parameters(
('DeepMind', 'DEEPMIND'), ('A ', 'A'), ('\tA', 'A'), (' A\t\n', 'A'),
('ACDEFGHIKLMNPQRSTVWY', 'ACDEFGHIKLMNPQRSTVWY'))
def test_clean_and_validate_sequence_ok(self, sequence, exp_clean):
clean = notebook_utils.clean_and_validate_sequence(
sequence, min_length=1, max_length=100)
self.assertEqual(clean, exp_clean)
@parameterized.named_parameters(
('too_short', 'AA', 'too short'),
('too_long', 'AAAAAAAAAA', 'too long'),
('bad_amino_acids_B', 'BBBB', 'non-amino acid'),
('bad_amino_acids_J', 'JJJJ', 'non-amino acid'),
('bad_amino_acids_O', 'OOOO', 'non-amino acid'),
('bad_amino_acids_U', 'UUUU', 'non-amino acid'),
('bad_amino_acids_X', 'XXXX', 'non-amino acid'),
('bad_amino_acids_Z', 'ZZZZ', 'non-amino acid'))
def test_clean_and_validate_sequence_bad(self, sequence, exp_error):
with self.assertRaisesRegex(ValueError, f'.*{exp_error}.*'):
notebook_utils.clean_and_validate_sequence(
sequence, min_length=4, max_length=8)
@parameterized.parameters(
(['A', '', '', ' ', '\t', ' \t\n', '', ''], ['A'],
notebook_utils.ModelType.MONOMER),
(['', 'A'], ['A'],
notebook_utils.ModelType.MONOMER),
(['A', 'C ', ''], ['A', 'C'],
notebook_utils.ModelType.MULTIMER),
(['', 'A', '', 'C '], ['A', 'C'],
notebook_utils.ModelType.MULTIMER))
def test_validate_input_ok(
self, input_sequences, exp_sequences, exp_model_type):
sequences, model_type = notebook_utils.validate_input(
input_sequences=input_sequences,
min_length=1, max_length=100, max_multimer_length=100)
self.assertSequenceEqual(sequences, exp_sequences)
self.assertEqual(model_type, exp_model_type)
@parameterized.named_parameters(
('no_input_sequence', ['', '\t', '\n'], 'No input amino acid sequence'),
('too_long_single', ['AAAAAAAAA', 'AAAA'], 'Input sequence is too long'),
('too_long_multimer', ['AAAA', 'AAAAA'], 'The total length of multimer'))
def test_validate_input_bad(self, input_sequences, exp_error):
with self.assertRaisesRegex(ValueError, f'.*{exp_error}.*'):
notebook_utils.validate_input(
input_sequences=input_sequences,
min_length=4, max_length=8, max_multimer_length=6)
def test_merge_chunked_msa_no_hits(self):
results = [ONLY_QUERY_HIT, ONLY_QUERY_HIT]
merged_msa = notebook_utils.merge_chunked_msa(
results=results)
self.assertSequenceEqual(
merged_msa.sequences,
('MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEH',))
self.assertSequenceEqual(merged_msa.deletion_matrix, ([0] * 56,))
def test_merge_chunked_msa(self):
results = [MULTI_SEQUENCE_HIT_1, MULTI_SEQUENCE_HIT_2]
merged_msa = notebook_utils.merge_chunked_msa(
results=results)
self.assertLen(merged_msa.sequences, 7)
# The 1st one is the query.
self.assertEqual(
merged_msa.sequences[0],
'MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEHAAQAAKHDAEHHAP'
'KPH')
# The 2nd one is the one with the lowest e-value: ERR1700680_4602609.
self.assertEqual(
merged_msa.sequences[1],
'--INKGAEYHKKAAEHHELAAKHHREAAKHHEAGSHEKAAHHSEIAAGHGLTAVHHTEEATK-HHPEEHT'
'EK-')
# The last one is the one with the largest e-value: SRR5438477_9761204.
self.assertEqual(
merged_msa.sequences[-1],
'MPKHEGAEHHKKAAEHNEHAARHHKEAARHHEEGSHEKVGHHAHIAHGHHLHATHHAEEAAKTHSNQHE-'
'---')
self.assertLen(merged_msa.deletion_matrix, 7)
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_show_msa_info(self, mocked_stdout):
single_chain_msas = [
parsers.Msa(sequences=['A', 'B', 'C', 'C'],
deletion_matrix=[None] * 4,
descriptions=[''] * 4),
parsers.Msa(sequences=['A', 'A', 'A', 'D'],
deletion_matrix=[None] * 4,
descriptions=[''] * 4)
]
notebook_utils.show_msa_info(
single_chain_msas=single_chain_msas, sequence_index=1)
self.assertEqual(mocked_stdout.getvalue(),
'\n4 unique sequences found in total for sequence 1\n\n')
@parameterized.named_parameters(
('some_templates', 4), ('no_templates', 0))
def test_empty_placeholder_template_features(self, num_templates):
template_features = notebook_utils.empty_placeholder_template_features(
num_templates=num_templates, num_res=16)
self.assertCountEqual(template_features.keys(),
templates.TEMPLATE_FEATURES.keys())
self.assertSameElements(
[v.shape[0] for v in template_features.values()], [num_templates])
self.assertSequenceEqual(
[t.dtype for t in template_features.values()],
[np.array([], dtype=templates.TEMPLATE_FEATURES[feat_name]).dtype
for feat_name in template_features])
def test_get_pae_json(self):
pae = np.array([[0.01, 13.12345], [20.0987, 0.0]])
pae_json = notebook_utils.get_pae_json(pae=pae, max_pae=31.75)
self.assertEqual(
pae_json,
'[{"residue1":[1,1,2,2],"residue2":[1,2,1,2],"distance":'
'[0.0,13.1,20.1,0.0],"max_predicted_aligned_error":31.75}]')
if __name__ == '__main__':
absltest.main()
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
"""Docker launch script for Alphafold docker image.""" """Docker launch script for Alphafold docker image."""
import os import os
import pathlib
import signal import signal
from typing import Tuple from typing import Tuple
...@@ -25,87 +26,54 @@ import docker ...@@ -25,87 +26,54 @@ import docker
from docker import types from docker import types
#### USER CONFIGURATION #### flags.DEFINE_bool(
'use_gpu', True, 'Enable NVIDIA runtime to run with GPUs.')
# Set to target of scripts/download_all_databases.sh flags.DEFINE_string(
DOWNLOAD_DIR = 'SET ME' 'gpu_devices', 'all',
'Comma separated list of devices to pass to NVIDIA_VISIBLE_DEVICES.')
# Name of the AlphaFold Docker image. flags.DEFINE_list(
docker_image_name = 'alphafold' 'fasta_paths', None,
'Paths to FASTA files, each containing one sequence. Paths should be '
# Path to a directory that will store the results. 'separated by commas. All FASTA paths must have a unique basename as the '
output_dir = '/tmp/alphafold' 'basename is used to name the output directories for each prediction.')
flags.DEFINE_list('is_prokaryote_list', None, 'Optional for multimer system, '
# Names of models to use. 'not used by the single chain system. '
model_names = [ 'This list should contain a boolean for each fasta '
'model_1', 'specifying true where the target complex is from a '
'model_2', 'prokaryote, and false where it is not, or where the '
'model_3', 'origin is unknown. These values determine the pairing '
'model_4', 'method for the MSA.')
'model_5', flags.DEFINE_string(
] 'output_dir', '/tmp/alphafold',
'Path to a directory that will store the results.')
# You can individually override the following paths if you have placed the flags.DEFINE_string(
# data in locations other than the DOWNLOAD_DIR. 'data_dir', None,
'Path to directory with supporting data: AlphaFold parameters and genetic '
# Path to directory of supporting data, contains 'params' dir. 'and template databases. Set to the target of download_all_databases.sh.')
data_dir = DOWNLOAD_DIR flags.DEFINE_string(
'docker_image_name', 'alphafold', 'Name of the AlphaFold Docker image.')
# Path to the Uniref90 database for use by JackHMMER. flags.DEFINE_string(
uniref90_database_path = os.path.join( 'max_template_date', None,
DOWNLOAD_DIR, 'uniref90', 'uniref90.fasta') 'Maximum template release date to consider (ISO-8601 format: YYYY-MM-DD). '
'Important if folding historical test sets.')
# Path to the MGnify database for use by JackHMMER. flags.DEFINE_enum(
mgnify_database_path = os.path.join( 'db_preset', 'full_dbs', ['full_dbs', 'reduced_dbs'],
DOWNLOAD_DIR, 'mgnify', 'mgy_clusters_2018_12.fa') 'Choose preset MSA database configuration - smaller genetic database '
'config (reduced_dbs) or full genetic database config (full_dbs)')
# Path to the BFD database for use by HHblits. flags.DEFINE_enum(
bfd_database_path = os.path.join( 'model_preset', 'monomer',
DOWNLOAD_DIR, 'bfd', ['monomer', 'monomer_casp14', 'monomer_ptm', 'multimer'],
'bfd_metaclust_clu_complete_id30_c90_final_seq.sorted_opt') 'Choose preset model configuration - the monomer model, the monomer model '
'with extra ensembling, monomer model with pTM head, or multimer model')
# Path to the Small BFD database for use by JackHMMER. flags.DEFINE_boolean(
small_bfd_database_path = os.path.join( 'benchmark', False,
DOWNLOAD_DIR, 'small_bfd', 'bfd-first_non_consensus_sequences.fasta') 'Run multiple JAX model evaluations to obtain a timing that excludes the '
'compilation time, which should be more indicative of the time required '
# Path to the Uniclust30 database for use by HHblits. 'for inferencing many proteins.')
uniclust30_database_path = os.path.join( flags.DEFINE_boolean(
DOWNLOAD_DIR, 'uniclust30', 'uniclust30_2018_08', 'uniclust30_2018_08') 'use_precomputed_msas', False,
'Whether to read MSAs that have been written to disk. WARNING: This will '
# Path to the PDB70 database for use by HHsearch. 'not check if the sequence, database or configuration have changed.')
pdb70_database_path = os.path.join(DOWNLOAD_DIR, 'pdb70', 'pdb70')
# Path to a directory with template mmCIF structures, each named <pdb_id>.cif')
template_mmcif_dir = os.path.join(DOWNLOAD_DIR, 'pdb_mmcif', 'mmcif_files')
# Path to a file mapping obsolete PDB IDs to their replacements.
obsolete_pdbs_path = os.path.join(DOWNLOAD_DIR, 'pdb_mmcif', 'obsolete.dat')
#### END OF USER CONFIGURATION ####
flags.DEFINE_bool('use_gpu', True, 'Enable NVIDIA runtime to run with GPUs.')
flags.DEFINE_string('gpu_devices', 'all', 'Comma separated list of devices to '
'pass to NVIDIA_VISIBLE_DEVICES.')
flags.DEFINE_list('fasta_paths', None, 'Paths to FASTA files, each containing '
'one sequence. Paths should be separated by commas. '
'All FASTA paths must have a unique basename as the '
'basename is used to name the output directories for '
'each prediction.')
flags.DEFINE_string('max_template_date', None, 'Maximum template release date '
'to consider (ISO-8601 format - i.e. YYYY-MM-DD). '
'Important if folding historical test sets.')
flags.DEFINE_enum('preset', 'full_dbs',
['reduced_dbs', 'full_dbs', 'casp14'],
'Choose preset model configuration - no ensembling and '
'smaller genetic database config (reduced_dbs), no '
'ensembling and full genetic database config (full_dbs) or '
'full genetic database config and 8 model ensemblings '
'(casp14).')
flags.DEFINE_boolean('benchmark', False, 'Run multiple JAX model evaluations '
'to obtain a timing that excludes the compilation time, '
'which should be more indicative of the time required for '
'inferencing many proteins.')
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
...@@ -125,6 +93,55 @@ def main(argv): ...@@ -125,6 +93,55 @@ def main(argv):
if len(argv) > 1: if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.') raise app.UsageError('Too many command-line arguments.')
# You can individually override the following paths if you have placed the
# data in locations other than the FLAGS.data_dir.
# Path to the Uniref90 database for use by JackHMMER.
uniref90_database_path = os.path.join(
FLAGS.data_dir, 'uniref90', 'uniref90.fasta')
# Path to the Uniprot database for use by JackHMMER.
uniprot_database_path = os.path.join(
FLAGS.data_dir, 'uniprot', 'uniprot.fasta')
# Path to the MGnify database for use by JackHMMER.
mgnify_database_path = os.path.join(
FLAGS.data_dir, 'mgnify', 'mgy_clusters_2018_12.fa')
# Path to the BFD database for use by HHblits.
bfd_database_path = os.path.join(
FLAGS.data_dir, 'bfd',
'bfd_metaclust_clu_complete_id30_c90_final_seq.sorted_opt')
# Path to the Small BFD database for use by JackHMMER.
small_bfd_database_path = os.path.join(
FLAGS.data_dir, 'small_bfd', 'bfd-first_non_consensus_sequences.fasta')
# Path to the Uniclust30 database for use by HHblits.
uniclust30_database_path = os.path.join(
FLAGS.data_dir, 'uniclust30', 'uniclust30_2018_08', 'uniclust30_2018_08')
# Path to the PDB70 database for use by HHsearch.
pdb70_database_path = os.path.join(FLAGS.data_dir, 'pdb70', 'pdb70')
# Path to the PDB seqres database for use by hmmsearch.
pdb_seqres_database_path = os.path.join(
FLAGS.data_dir, 'pdb_seqres', 'pdb_seqres.txt')
# Path to a directory with template mmCIF structures, each named <pdb_id>.cif.
template_mmcif_dir = os.path.join(FLAGS.data_dir, 'pdb_mmcif', 'mmcif_files')
# Path to a file mapping obsolete PDB IDs to their replacements.
obsolete_pdbs_path = os.path.join(FLAGS.data_dir, 'pdb_mmcif', 'obsolete.dat')
alphafold_path = pathlib.Path(__file__).parent.parent
data_dir_path = pathlib.Path(FLAGS.data_dir)
if alphafold_path == data_dir_path or alphafold_path in data_dir_path.parents:
raise app.UsageError(
f'The download directory {FLAGS.data_dir} should not be a subdirectory '
f'in the AlphaFold repository directory. If it is, the Docker build is '
f'slow since the large databases are copied during the image creation.')
mounts = [] mounts = []
command_args = [] command_args = []
...@@ -139,12 +156,19 @@ def main(argv): ...@@ -139,12 +156,19 @@ def main(argv):
database_paths = [ database_paths = [
('uniref90_database_path', uniref90_database_path), ('uniref90_database_path', uniref90_database_path),
('mgnify_database_path', mgnify_database_path), ('mgnify_database_path', mgnify_database_path),
('pdb70_database_path', pdb70_database_path), ('data_dir', FLAGS.data_dir),
('data_dir', data_dir),
('template_mmcif_dir', template_mmcif_dir), ('template_mmcif_dir', template_mmcif_dir),
('obsolete_pdbs_path', obsolete_pdbs_path), ('obsolete_pdbs_path', obsolete_pdbs_path),
] ]
if FLAGS.preset == 'reduced_dbs':
if FLAGS.model_preset == 'multimer':
database_paths.append(('uniprot_database_path', uniprot_database_path))
database_paths.append(('pdb_seqres_database_path',
pdb_seqres_database_path))
else:
database_paths.append(('pdb70_database_path', pdb70_database_path))
if FLAGS.db_preset == 'reduced_dbs':
database_paths.append(('small_bfd_database_path', small_bfd_database_path)) database_paths.append(('small_bfd_database_path', small_bfd_database_path))
else: else:
database_paths.extend([ database_paths.extend([
...@@ -158,20 +182,25 @@ def main(argv): ...@@ -158,20 +182,25 @@ def main(argv):
command_args.append(f'--{name}={target_path}') command_args.append(f'--{name}={target_path}')
output_target_path = os.path.join(_ROOT_MOUNT_DIRECTORY, 'output') output_target_path = os.path.join(_ROOT_MOUNT_DIRECTORY, 'output')
mounts.append(types.Mount(output_target_path, output_dir, type='bind')) mounts.append(types.Mount(output_target_path, FLAGS.output_dir, type='bind'))
command_args.extend([ command_args.extend([
f'--output_dir={output_target_path}', f'--output_dir={output_target_path}',
f'--model_names={",".join(model_names)}',
f'--max_template_date={FLAGS.max_template_date}', f'--max_template_date={FLAGS.max_template_date}',
f'--preset={FLAGS.preset}', f'--db_preset={FLAGS.db_preset}',
f'--model_preset={FLAGS.model_preset}',
f'--benchmark={FLAGS.benchmark}', f'--benchmark={FLAGS.benchmark}',
f'--use_precomputed_msas={FLAGS.use_precomputed_msas}',
'--logtostderr', '--logtostderr',
]) ])
if FLAGS.is_prokaryote_list:
command_args.append(
f'--is_prokaryote_list={",".join(FLAGS.is_prokaryote_list)}')
client = docker.from_env() client = docker.from_env()
container = client.containers.run( container = client.containers.run(
image=docker_image_name, image=FLAGS.docker_image_name,
command=command_args, command=command_args,
runtime='nvidia' if FLAGS.use_gpu else None, runtime='nvidia' if FLAGS.use_gpu else None,
remove=True, remove=True,
...@@ -195,6 +224,7 @@ def main(argv): ...@@ -195,6 +224,7 @@ def main(argv):
if __name__ == '__main__': if __name__ == '__main__':
flags.mark_flags_as_required([ flags.mark_flags_as_required([
'data_dir',
'fasta_paths', 'fasta_paths',
'max_template_date', 'max_template_date',
]) ])
......
{ {
"cells": [ "cells": [
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
"id": "pc5-mbsX9PZC" "id": "pc5-mbsX9PZC"
}, },
"source": [ "source": [
"# AlphaFold Colab\n", "# AlphaFold Colab\n",
"\n", "\n",
"This Colab notebook allows you to easily predict the structure of a protein using a slightly simplified version of [AlphaFold v2.0](https://doi.org/10.1038/s41586-021-03819-2). \n", "This Colab notebook allows you to easily predict the structure of a protein using a slightly simplified version of [AlphaFold v2.1.0](https://doi.org/10.1038/s41586-021-03819-2). \n",
"\n", "\n",
"**Differences to AlphaFold v2.0**\n", "**Differences to AlphaFold v2.1.0**\n",
"\n", "\n",
"In comparison to AlphaFold v2.0, this Colab notebook uses **no templates (homologous structures)** and a selected portion of the [BFD database](https://bfd.mmseqs.com/). We have validated these changes on several thousand recent PDB structures. While accuracy will be near-identical to the full AlphaFold system on many targets, a small fraction have a large drop in accuracy due to the smaller MSA and lack of templates. For best reliability, we recommend instead using the [full open source AlphaFold](https://github.com/deepmind/alphafold/), or the [AlphaFold Protein Structure Database](https://alphafold.ebi.ac.uk/).\n", "In comparison to AlphaFold v2.1.0, this Colab notebook uses **no templates (homologous structures)** and a selected portion of the [BFD database](https://bfd.mmseqs.com/). We have validated these changes on several thousand recent PDB structures. While accuracy will be near-identical to the full AlphaFold system on many targets, a small fraction have a large drop in accuracy due to the smaller MSA and lack of templates. For best reliability, we recommend instead using the [full open source AlphaFold](https://github.com/deepmind/alphafold/), or the [AlphaFold Protein Structure Database](https://alphafold.ebi.ac.uk/).\n",
"\n", "\n",
"Please note that this Colab notebook is provided as an early-access prototype and is not a finished product. It is provided for theoretical modelling only and caution should be exercised in its use. \n", "**This Colab has an small drop in average accuracy for multimers compared to local AlphaFold installation, for full multimer accuracy it is highly recommended to run [AlphaFold locally](https://github.com/deepmind/alphafold#running-alphafold).** Moreover, the AlphaFold-Multimer requires searching for MSA for every unique sequence in the complex, hence it is substantially slower. If your notebook times-out due to slow multimer MSA search, we recommend either using Colab Pro or running AlphaFold locally.\n",
"\n", "\n",
"**Citing this work**\n", "Please note that this Colab notebook is provided as an early-access prototype and is not a finished product. It is provided for theoretical modelling only and caution should be exercised in its use. \n",
"\n", "\n",
"Any publication that discloses findings arising from using this notebook should [cite](https://github.com/deepmind/alphafold/#citing-this-work) the [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2).\n", "**Citing this work**\n",
"\n", "\n",
"**Licenses**\n", "Any publication that discloses findings arising from using this notebook should [cite](https://github.com/deepmind/alphafold/#citing-this-work) the [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2).\n",
"\n", "\n",
"This Colab uses the [AlphaFold model parameters](https://github.com/deepmind/alphafold/#model-parameters-license) and its outputs are thus for non-commercial use only, under the Creative Commons Attribution-NonCommercial 4.0 International ([CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/legalcode)) license. The Colab itself is provided under the [Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0). See the full license statement below.\n", "**Licenses**\n",
"\n", "\n",
"**More information**\n", "This Colab uses the [AlphaFold model parameters](https://github.com/deepmind/alphafold/#model-parameters-license) and its outputs are thus for non-commercial use only, under the Creative Commons Attribution-NonCommercial 4.0 International ([CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/legalcode)) license. The Colab itself is provided under the [Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0). See the full license statement below.\n",
"\n", "\n",
"You can find more information about how AlphaFold works in our two Nature papers:\n", "**More information**\n",
"\n", "\n",
"* [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2)\n", "You can find more information about how AlphaFold works in the following papers:\n",
"* [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1)\n", "\n",
"\n", "* [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2)\n",
"FAQ on how to interpret AlphaFold predictions are [here](https://alphafold.ebi.ac.uk/faq)." "* [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1)\n",
] "* [AlphaFold-Multimer paper](https://www.biorxiv.org/content/10.1101/2021.10.04.463034v1)\n",
"\n",
"FAQ on how to interpret AlphaFold predictions are [here](https://alphafold.ebi.ac.uk/faq)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "woIxeCPygt7K"
},
"outputs": [],
"source": [
"#@title Install third-party software\n",
"\n",
"#@markdown Please execute this cell by pressing the _Play_ button \n",
"#@markdown on the left to download and import third-party software \n",
"#@markdown in this Colab notebook. (See the [acknowledgements](https://github.com/deepmind/alphafold/#acknowledgements) in our readme.)\n",
"\n",
"#@markdown **Note**: This installs the software on the Colab \n",
"#@markdown notebook in the cloud and not on your computer.\n",
"\n",
"from IPython.utils import io\n",
"import os\n",
"import subprocess\n",
"import tqdm.notebook\n",
"\n",
"TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'\n",
"\n",
"try:\n",
" with tqdm.notebook.tqdm(total=100, bar_format=TQDM_BAR_FORMAT) as pbar:\n",
" with io.capture_output() as captured:\n",
" # Uninstall default Colab version of TF.\n",
" %shell pip uninstall -y tensorflow\n",
"\n",
" %shell sudo apt install --quiet --yes hmmer\n",
" pbar.update(6)\n",
"\n",
" # Install py3dmol.\n",
" %shell pip install py3dmol\n",
" pbar.update(2)\n",
"\n",
" # Install OpenMM and pdbfixer.\n",
" %shell rm -rf /opt/conda\n",
" %shell wget -q -P /tmp \\\n",
" https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \\\n",
" \u0026\u0026 bash /tmp/Miniconda3-latest-Linux-x86_64.sh -b -p /opt/conda \\\n",
" \u0026\u0026 rm /tmp/Miniconda3-latest-Linux-x86_64.sh\n",
" pbar.update(9)\n",
"\n",
" PATH=%env PATH\n",
" %env PATH=/opt/conda/bin:{PATH}\n",
" %shell conda update -qy conda \\\n",
" \u0026\u0026 conda install -qy -c conda-forge \\\n",
" python=3.7 \\\n",
" openmm=7.5.1 \\\n",
" pdbfixer\n",
" pbar.update(80)\n",
"\n",
" # Create a ramdisk to store a database chunk to make Jackhmmer run fast.\n",
" %shell sudo mkdir -m 777 --parents /tmp/ramdisk\n",
" %shell sudo mount -t tmpfs -o size=9G ramdisk /tmp/ramdisk\n",
" pbar.update(2)\n",
"\n",
" %shell wget -q -P /content \\\n",
" https://git.scicore.unibas.ch/schwede/openstructure/-/raw/7102c63615b64735c4941278d92b554ec94415f8/modules/mol/alg/src/stereo_chemical_props.txt\n",
" pbar.update(1)\n",
"except subprocess.CalledProcessError:\n",
" print(captured)\n",
" raise"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "VzJ5iMjTtoZw"
},
"outputs": [],
"source": [
"#@title Download AlphaFold\n",
"\n",
"#@markdown Please execute this cell by pressing the *Play* button on \n",
"#@markdown the left.\n",
"\n",
"GIT_REPO = 'https://github.com/deepmind/alphafold'\n",
"\n",
"SOURCE_URL = 'https://storage.googleapis.com/alphafold/alphafold_params_colab_2021-10-27.tar'\n",
"PARAMS_DIR = './alphafold/data/params'\n",
"PARAMS_PATH = os.path.join(PARAMS_DIR, os.path.basename(SOURCE_URL))\n",
"\n",
"try:\n",
" with tqdm.notebook.tqdm(total=100, bar_format=TQDM_BAR_FORMAT) as pbar:\n",
" with io.capture_output() as captured:\n",
" %shell rm -rf alphafold\n",
" %shell git clone --branch main {GIT_REPO} alphafold\n",
" pbar.update(8)\n",
" # Install the required versions of all dependencies.\n",
" %shell pip3 install -r ./alphafold/requirements.txt\n",
" # Run setup.py to install only AlphaFold.\n",
" %shell pip3 install --no-dependencies ./alphafold\n",
" pbar.update(10)\n",
"\n",
" # Apply OpenMM patch.\n",
" %shell pushd /opt/conda/lib/python3.7/site-packages/ \u0026\u0026 \\\n",
" patch -p0 \u003c /content/alphafold/docker/openmm.patch \u0026\u0026 \\\n",
" popd\n",
"\n",
" # Make sure stereo_chemical_props.txt is in all locations where it could be searched for.\n",
" %shell mkdir -p /content/alphafold/alphafold/common\n",
" %shell cp -f /content/stereo_chemical_props.txt /content/alphafold/alphafold/common\n",
" %shell mkdir -p /opt/conda/lib/python3.7/site-packages/alphafold/common/\n",
" %shell cp -f /content/stereo_chemical_props.txt /opt/conda/lib/python3.7/site-packages/alphafold/common/\n",
"\n",
" %shell mkdir --parents \"{PARAMS_DIR}\"\n",
" %shell wget -O \"{PARAMS_PATH}\" \"{SOURCE_URL}\"\n",
" pbar.update(27)\n",
"\n",
" %shell tar --extract --verbose --file=\"{PARAMS_PATH}\" \\\n",
" --directory=\"{PARAMS_DIR}\" --preserve-permissions\n",
" %shell rm \"{PARAMS_PATH}\"\n",
" pbar.update(55)\n",
"except subprocess.CalledProcessError:\n",
" print(captured)\n",
" raise\n",
"\n",
"import jax\n",
"if jax.local_devices()[0].platform == 'tpu':\n",
" raise RuntimeError('Colab TPU runtime not supported. Change it to GPU via Runtime -\u003e Change Runtime Type -\u003e Hardware accelerator -\u003e GPU.')\n",
"elif jax.local_devices()[0].platform == 'cpu':\n",
" raise RuntimeError('Colab CPU runtime not supported. Change it to GPU via Runtime -\u003e Change Runtime Type -\u003e Hardware accelerator -\u003e GPU.')\n",
"else:\n",
" print(f'Running with {jax.local_devices()[0].device_kind} GPU')\n",
"\n",
"# Make sure everything we need is on the path.\n",
"import sys\n",
"sys.path.append('/opt/conda/lib/python3.7/site-packages')\n",
"sys.path.append('/content/alphafold')\n",
"\n",
"# Make sure all necessary environment variables are set.\n",
"import os\n",
"os.environ['TF_FORCE_UNIFIED_MEMORY'] = '1'\n",
"os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '2.0'"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "W4JpOs6oA-QS"
},
"source": [
"## Making a prediction\n",
"\n",
"Please paste the sequence of your protein in the text box below, then run the remaining cells via _Runtime_ \u003e _Run after_. You can also run the cells individually by pressing the _Play_ button on the left.\n",
"\n",
"Note that the search against databases and the actual prediction can take some time, from minutes to hours, depending on the length of the protein and what type of GPU you are allocated by Colab (see FAQ below)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "rowN0bVYLe9n"
},
"outputs": [],
"source": [
"#@title Enter the amino acid sequence(s) to fold ⬇️\n",
"#@markdown Enter the amino acid sequence(s) to fold:\n",
"#@markdown * If you enter only a single sequence, the monomer model will be used.\n",
"#@markdown * If you enter multiple sequences, the multimer model will be used.\n",
"\n",
"from alphafold.notebooks import notebook_utils\n",
"\n",
"sequence_1 = 'MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEHAAQAAKHDAEHHAPKPH' #@param {type:\"string\"}\n",
"sequence_2 = '' #@param {type:\"string\"}\n",
"sequence_3 = '' #@param {type:\"string\"}\n",
"sequence_4 = '' #@param {type:\"string\"}\n",
"sequence_5 = '' #@param {type:\"string\"}\n",
"sequence_6 = '' #@param {type:\"string\"}\n",
"sequence_7 = '' #@param {type:\"string\"}\n",
"sequence_8 = '' #@param {type:\"string\"}\n",
"\n",
"input_sequences = (sequence_1, sequence_2, sequence_3, sequence_4,\n",
" sequence_5, sequence_6, sequence_7, sequence_8)\n",
"\n",
"#@markdown If folding a complex target and all the input sequences are\n",
"#@markdown prokaryotic then set `is_prokaryotic` to `True`. Set to `False`\n",
"#@markdown otherwise or if the origin is unknown.\n",
"\n",
"is_prokaryote = False #@param {type:\"boolean\"}\n",
"\n",
"MIN_SINGLE_SEQUENCE_LENGTH = 16\n",
"MAX_SINGLE_SEQUENCE_LENGTH = 2500\n",
"MAX_MULTIMER_LENGTH = 2500\n",
"\n",
"# Validate the input.\n",
"sequences, model_type_to_use = notebook_utils.validate_input(\n",
" input_sequences=input_sequences,\n",
" min_length=MIN_SINGLE_SEQUENCE_LENGTH,\n",
" max_length=MAX_SINGLE_SEQUENCE_LENGTH,\n",
" max_multimer_length=MAX_MULTIMER_LENGTH)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "2tTeTTsLKPjB"
},
"outputs": [],
"source": [
"#@title Search against genetic databases\n",
"\n",
"#@markdown Once this cell has been executed, you will see\n",
"#@markdown statistics about the multiple sequence alignment \n",
"#@markdown (MSA) that will be used by AlphaFold. In particular, \n",
"#@markdown you’ll see how well each residue is covered by similar \n",
"#@markdown sequences in the MSA.\n",
"\n",
"# --- Python imports ---\n",
"import collections\n",
"import copy\n",
"from concurrent import futures\n",
"import json\n",
"import random\n",
"\n",
"from urllib import request\n",
"from google.colab import files\n",
"from matplotlib import gridspec\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import py3Dmol\n",
"\n",
"from alphafold.model import model\n",
"from alphafold.model import config\n",
"from alphafold.model import data\n",
"\n",
"from alphafold.data import feature_processing\n",
"from alphafold.data import msa_pairing\n",
"from alphafold.data import parsers\n",
"from alphafold.data import pipeline\n",
"from alphafold.data import pipeline_multimer\n",
"from alphafold.data.tools import jackhmmer\n",
"\n",
"from alphafold.common import protein\n",
"\n",
"from alphafold.relax import relax\n",
"from alphafold.relax import utils\n",
"\n",
"from IPython import display\n",
"from ipywidgets import GridspecLayout\n",
"from ipywidgets import Output\n",
"\n",
"# Color bands for visualizing plddt\n",
"PLDDT_BANDS = [(0, 50, '#FF7D45'),\n",
" (50, 70, '#FFDB13'),\n",
" (70, 90, '#65CBF3'),\n",
" (90, 100, '#0053D6')]\n",
"\n",
"# --- Find the closest source ---\n",
"test_url_pattern = 'https://storage.googleapis.com/alphafold-colab{:s}/latest/uniref90_2021_03.fasta.1'\n",
"ex = futures.ThreadPoolExecutor(3)\n",
"def fetch(source):\n",
" request.urlretrieve(test_url_pattern.format(source))\n",
" return source\n",
"fs = [ex.submit(fetch, source) for source in ['', '-europe', '-asia']]\n",
"source = None\n",
"for f in futures.as_completed(fs):\n",
" source = f.result()\n",
" ex.shutdown()\n",
" break\n",
"\n",
"JACKHMMER_BINARY_PATH = '/usr/bin/jackhmmer'\n",
"DB_ROOT_PATH = f'https://storage.googleapis.com/alphafold-colab{source}/latest/'\n",
"# The z_value is the number of sequences in a database.\n",
"MSA_DATABASES = [\n",
" {'db_name': 'uniref90',\n",
" 'db_path': f'{DB_ROOT_PATH}uniref90_2021_03.fasta',\n",
" 'num_streamed_chunks': 59,\n",
" 'z_value': 135_301_051},\n",
" {'db_name': 'smallbfd',\n",
" 'db_path': f'{DB_ROOT_PATH}bfd-first_non_consensus_sequences.fasta',\n",
" 'num_streamed_chunks': 17,\n",
" 'z_value': 65_984_053},\n",
" {'db_name': 'mgnify',\n",
" 'db_path': f'{DB_ROOT_PATH}mgy_clusters_2019_05.fasta',\n",
" 'num_streamed_chunks': 71,\n",
" 'z_value': 304_820_129},\n",
"]\n",
"\n",
"# Search UniProt and construct the all_seq features only for heteromers, not homomers.\n",
"if model_type_to_use == notebook_utils.ModelType.MULTIMER and len(set(sequences)) \u003e 1:\n",
" MSA_DATABASES.extend([\n",
" # Swiss-Prot and TrEMBL are concatenated together as UniProt.\n",
" {'db_name': 'uniprot',\n",
" 'db_path': f'{DB_ROOT_PATH}uniprot_2021_03.fasta',\n",
" 'num_streamed_chunks': 98,\n",
" 'z_value': 219_174_961 + 565_254},\n",
" ])\n",
"\n",
"TOTAL_JACKHMMER_CHUNKS = sum([cfg['num_streamed_chunks'] for cfg in MSA_DATABASES])\n",
"\n",
"MAX_HITS = {\n",
" 'uniref90': 10_000,\n",
" 'smallbfd': 5_000,\n",
" 'mgnify': 501,\n",
" 'uniprot': 50_000,\n",
"}\n",
"\n",
"\n",
"def get_msa(fasta_path):\n",
" \"\"\"Searches for MSA for the given sequence using chunked Jackhmmer search.\"\"\"\n",
"\n",
" # Run the search against chunks of genetic databases (since the genetic\n",
" # databases don't fit in Colab disk).\n",
" raw_msa_results = collections.defaultdict(list)\n",
" with tqdm.notebook.tqdm(total=TOTAL_JACKHMMER_CHUNKS, bar_format=TQDM_BAR_FORMAT) as pbar:\n",
" def jackhmmer_chunk_callback(i):\n",
" pbar.update(n=1)\n",
"\n",
" for db_config in MSA_DATABASES:\n",
" db_name = db_config['db_name']\n",
" pbar.set_description(f'Searching {db_name}')\n",
" jackhmmer_runner = jackhmmer.Jackhmmer(\n",
" binary_path=JACKHMMER_BINARY_PATH,\n",
" database_path=db_config['db_path'],\n",
" get_tblout=True,\n",
" num_streamed_chunks=db_config['num_streamed_chunks'],\n",
" streaming_callback=jackhmmer_chunk_callback,\n",
" z_value=db_config['z_value'])\n",
" # Group the results by database name.\n",
" raw_msa_results[db_name].extend(jackhmmer_runner.query(fasta_path))\n",
"\n",
" return raw_msa_results\n",
"\n",
"\n",
"features_for_chain = {}\n",
"raw_msa_results_for_sequence = {}\n",
"for sequence_index, sequence in enumerate(sequences, start=1):\n",
" print(f'\\nGetting MSA for sequence {sequence_index}')\n",
"\n",
" fasta_path = f'target_{sequence_index}.fasta'\n",
" with open(fasta_path, 'wt') as f:\n",
" f.write(f'\u003equery\\n{sequence}')\n",
"\n",
" # Don't do redundant work for multiple copies of the same chain in the multimer.\n",
" if sequence not in raw_msa_results_for_sequence:\n",
" raw_msa_results = get_msa(fasta_path=fasta_path)\n",
" raw_msa_results_for_sequence[sequence] = raw_msa_results\n",
" else:\n",
" raw_msa_results = copy.deepcopy(raw_msa_results_for_sequence[sequence])\n",
"\n",
" # Extract the MSAs from the Stockholm files.\n",
" # NB: deduplication happens later in pipeline.make_msa_features.\n",
" single_chain_msas = []\n",
" uniprot_msa = None\n",
" for db_name, db_results in raw_msa_results.items():\n",
" merged_msa = notebook_utils.merge_chunked_msa(\n",
" results=db_results, max_hits=MAX_HITS.get(db_name))\n",
" if merged_msa.sequences and db_name != 'uniprot':\n",
" single_chain_msas.append(merged_msa)\n",
" msa_size = len(set(merged_msa.sequences))\n",
" print(f'{msa_size} unique sequences found in {db_name} for sequence {sequence_index}')\n",
" elif merged_msa.sequences and db_name == 'uniprot':\n",
" uniprot_msa = merged_msa\n",
"\n",
" notebook_utils.show_msa_info(single_chain_msas=single_chain_msas, sequence_index=sequence_index)\n",
"\n",
" # Turn the raw data into model features.\n",
" feature_dict = {}\n",
" feature_dict.update(pipeline.make_sequence_features(\n",
" sequence=sequence, description='query', num_res=len(sequence)))\n",
" feature_dict.update(pipeline.make_msa_features(msas=single_chain_msas))\n",
" # We don't use templates in AlphaFold Colab notebook, add only empty placeholder features.\n",
" feature_dict.update(notebook_utils.empty_placeholder_template_features(\n",
" num_templates=0, num_res=len(sequence)))\n",
"\n",
" # Construct the all_seq features only for heteromers, not homomers.\n",
" if model_type_to_use == notebook_utils.ModelType.MULTIMER and len(set(sequences)) \u003e 1:\n",
" valid_feats = msa_pairing.MSA_FEATURES + (\n",
" 'msa_uniprot_accession_identifiers',\n",
" 'msa_species_identifiers',\n",
" )\n",
" all_seq_features = {\n",
" f'{k}_all_seq': v for k, v in pipeline.make_msa_features([uniprot_msa]).items()\n",
" if k in valid_feats}\n",
" feature_dict.update(all_seq_features)\n",
"\n",
" features_for_chain[protein.PDB_CHAIN_IDS[sequence_index - 1]] = feature_dict\n",
"\n",
"\n",
"# Do further feature post-processing depending on the model type.\n",
"if model_type_to_use == notebook_utils.ModelType.MONOMER:\n",
" np_example = features_for_chain[protein.PDB_CHAIN_IDS[0]]\n",
"\n",
"elif model_type_to_use == notebook_utils.ModelType.MULTIMER:\n",
" all_chain_features = {}\n",
" for chain_id, chain_features in features_for_chain.items():\n",
" all_chain_features[chain_id] = pipeline_multimer.convert_monomer_features(\n",
" chain_features, chain_id)\n",
"\n",
" all_chain_features = pipeline_multimer.add_assembly_features(all_chain_features)\n",
"\n",
" np_example = feature_processing.pair_and_merge(\n",
" all_chain_features=all_chain_features, is_prokaryote=is_prokaryote)\n",
"\n",
" # Pad MSA to avoid zero-sized extra_msa.\n",
" np_example = pipeline_multimer.pad_msa(np_example, min_num_seq=512)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "XUo6foMQxwS2"
},
"outputs": [],
"source": [
"#@title Run AlphaFold and download prediction\n",
"\n",
"#@markdown Once this cell has been executed, a zip-archive with\n",
"#@markdown the obtained prediction will be automatically downloaded\n",
"#@markdown to your computer.\n",
"\n",
"#@markdown In case you are having issues with the relaxation stage, you can disable it below.\n",
"#@markdown Warning: This means that the prediction might have distracting\n",
"#@markdown small stereochemical violations.\n",
"\n",
"run_relax = True #@param {type:\"boolean\"}\n",
"\n",
"# --- Run the model ---\n",
"if model_type_to_use == notebook_utils.ModelType.MONOMER:\n",
" model_names = config.MODEL_PRESETS['monomer'] + ('model_2_ptm',)\n",
"elif model_type_to_use == notebook_utils.ModelType.MULTIMER:\n",
" model_names = config.MODEL_PRESETS['multimer']\n",
"\n",
"output_dir = 'prediction'\n",
"os.makedirs(output_dir, exist_ok=True)\n",
"\n",
"plddts = {}\n",
"ranking_confidences = {}\n",
"pae_outputs = {}\n",
"unrelaxed_proteins = {}\n",
"\n",
"with tqdm.notebook.tqdm(total=len(model_names) + 1, bar_format=TQDM_BAR_FORMAT) as pbar:\n",
" for model_name in model_names:\n",
" pbar.set_description(f'Running {model_name}')\n",
"\n",
" cfg = config.model_config(model_name)\n",
" if model_type_to_use == notebook_utils.ModelType.MONOMER:\n",
" cfg.data.eval.num_ensemble = 1\n",
" elif model_type_to_use == notebook_utils.ModelType.MULTIMER:\n",
" cfg.model.num_ensemble_eval = 1\n",
" params = data.get_model_haiku_params(model_name, './alphafold/data')\n",
" model_runner = model.RunModel(cfg, params)\n",
" processed_feature_dict = model_runner.process_features(np_example, random_seed=0)\n",
" prediction = model_runner.predict(processed_feature_dict, random_seed=random.randrange(sys.maxsize))\n",
"\n",
" mean_plddt = prediction['plddt'].mean()\n",
"\n",
" if model_type_to_use == notebook_utils.ModelType.MONOMER:\n",
" if 'predicted_aligned_error' in prediction:\n",
" pae_outputs[model_name] = (prediction['predicted_aligned_error'],\n",
" prediction['max_predicted_aligned_error'])\n",
" else:\n",
" # Monomer models are sorted by mean pLDDT. Do not put monomer pTM models here as they\n",
" # should never get selected.\n",
" ranking_confidences[model_name] = prediction['ranking_confidence']\n",
" plddts[model_name] = prediction['plddt']\n",
" elif model_type_to_use == notebook_utils.ModelType.MULTIMER:\n",
" # Multimer models are sorted by pTM+ipTM.\n",
" ranking_confidences[model_name] = prediction['ranking_confidence']\n",
" plddts[model_name] = prediction['plddt']\n",
" pae_outputs[model_name] = (prediction['predicted_aligned_error'],\n",
" prediction['max_predicted_aligned_error'])\n",
"\n",
" # Set the b-factors to the per-residue plddt.\n",
" final_atom_mask = prediction['structure_module']['final_atom_mask']\n",
" b_factors = prediction['plddt'][:, None] * final_atom_mask\n",
" unrelaxed_protein = protein.from_prediction(\n",
" processed_feature_dict,\n",
" prediction,\n",
" b_factors=b_factors,\n",
" remove_leading_feature_dimension=(\n",
" model_type_to_use == notebook_utils.ModelType.MONOMER))\n",
" unrelaxed_proteins[model_name] = unrelaxed_protein\n",
"\n",
" # Delete unused outputs to save memory.\n",
" del model_runner\n",
" del params\n",
" del prediction\n",
" pbar.update(n=1)\n",
"\n",
" # --- AMBER relax the best model ---\n",
"\n",
" # Find the best model according to the mean pLDDT.\n",
" best_model_name = max(ranking_confidences.keys(), key=lambda x: ranking_confidences[x])\n",
"\n",
" if run_relax:\n",
" pbar.set_description(f'AMBER relaxation')\n",
" amber_relaxer = relax.AmberRelaxation(\n",
" max_iterations=0,\n",
" tolerance=2.39,\n",
" stiffness=10.0,\n",
" exclude_residues=[],\n",
" max_outer_iterations=3)\n",
" relaxed_pdb, _, _ = amber_relaxer.process(prot=unrelaxed_proteins[best_model_name])\n",
" else:\n",
" print('Warning: Running without the relaxation stage.')\n",
" relaxed_pdb = protein.to_pdb(unrelaxed_proteins[best_model_name])\n",
" pbar.update(n=1) # Finished AMBER relax.\n",
"\n",
"# Construct multiclass b-factors to indicate confidence bands\n",
"# 0=very low, 1=low, 2=confident, 3=very high\n",
"banded_b_factors = []\n",
"for plddt in plddts[best_model_name]:\n",
" for idx, (min_val, max_val, _) in enumerate(PLDDT_BANDS):\n",
" if plddt \u003e= min_val and plddt \u003c= max_val:\n",
" banded_b_factors.append(idx)\n",
" break\n",
"banded_b_factors = np.array(banded_b_factors)[:, None] * final_atom_mask\n",
"to_visualize_pdb = utils.overwrite_b_factors(relaxed_pdb, banded_b_factors)\n",
"\n",
"\n",
"# Write out the prediction\n",
"pred_output_path = os.path.join(output_dir, 'selected_prediction.pdb')\n",
"with open(pred_output_path, 'w') as f:\n",
" f.write(relaxed_pdb)\n",
"\n",
"\n",
"# --- Visualise the prediction \u0026 confidence ---\n",
"show_sidechains = True\n",
"def plot_plddt_legend():\n",
" \"\"\"Plots the legend for pLDDT.\"\"\"\n",
" thresh = ['Very low (pLDDT \u003c 50)',\n",
" 'Low (70 \u003e pLDDT \u003e 50)',\n",
" 'Confident (90 \u003e pLDDT \u003e 70)',\n",
" 'Very high (pLDDT \u003e 90)']\n",
"\n",
" colors = [x[2] for x in PLDDT_BANDS]\n",
"\n",
" plt.figure(figsize=(2, 2))\n",
" for c in colors:\n",
" plt.bar(0, 0, color=c)\n",
" plt.legend(thresh, frameon=False, loc='center', fontsize=20)\n",
" plt.xticks([])\n",
" plt.yticks([])\n",
" ax = plt.gca()\n",
" ax.spines['right'].set_visible(False)\n",
" ax.spines['top'].set_visible(False)\n",
" ax.spines['left'].set_visible(False)\n",
" ax.spines['bottom'].set_visible(False)\n",
" plt.title('Model Confidence', fontsize=20, pad=20)\n",
" return plt\n",
"\n",
"# Show the structure coloured by chain if the multimer model has been used.\n",
"if model_type_to_use == notebook_utils.ModelType.MULTIMER:\n",
" multichain_view = py3Dmol.view(width=800, height=600)\n",
" multichain_view.addModelsAsFrames(to_visualize_pdb)\n",
" multichain_style = {'cartoon': {'colorscheme': 'chain'}}\n",
" multichain_view.setStyle({'model': -1}, multichain_style)\n",
" multichain_view.zoomTo()\n",
" multichain_view.show()\n",
"\n",
"# Color the structure by per-residue pLDDT\n",
"color_map = {i: bands[2] for i, bands in enumerate(PLDDT_BANDS)}\n",
"view = py3Dmol.view(width=800, height=600)\n",
"view.addModelsAsFrames(to_visualize_pdb)\n",
"style = {'cartoon': {'colorscheme': {'prop': 'b', 'map': color_map}}}\n",
"if show_sidechains:\n",
" style['stick'] = {}\n",
"view.setStyle({'model': -1}, style)\n",
"view.zoomTo()\n",
"\n",
"grid = GridspecLayout(1, 2)\n",
"out = Output()\n",
"with out:\n",
" view.show()\n",
"grid[0, 0] = out\n",
"\n",
"out = Output()\n",
"with out:\n",
" plot_plddt_legend().show()\n",
"grid[0, 1] = out\n",
"\n",
"display.display(grid)\n",
"\n",
"# Display pLDDT and predicted aligned error (if output by the model).\n",
"if pae_outputs:\n",
" num_plots = 2\n",
"else:\n",
" num_plots = 1\n",
"\n",
"plt.figure(figsize=[8 * num_plots, 6])\n",
"plt.subplot(1, num_plots, 1)\n",
"plt.plot(plddts[best_model_name])\n",
"plt.title('Predicted LDDT')\n",
"plt.xlabel('Residue')\n",
"plt.ylabel('pLDDT')\n",
"\n",
"if num_plots == 2:\n",
" plt.subplot(1, 2, 2)\n",
" pae, max_pae = list(pae_outputs.values())[0]\n",
" plt.imshow(pae, vmin=0., vmax=max_pae, cmap='Greens_r')\n",
" plt.colorbar(fraction=0.046, pad=0.04)\n",
"\n",
" # Display lines at chain boundaries.\n",
" best_unrelaxed_prot = unrelaxed_proteins[best_model_name]\n",
" total_num_res = best_unrelaxed_prot.residue_index.shape[-1]\n",
" chain_ids = best_unrelaxed_prot.chain_index\n",
" for chain_boundary in np.nonzero(chain_ids[:-1] - chain_ids[1:]):\n",
" plt.plot([0, total_num_res], [chain_boundary, chain_boundary], color='red')\n",
" plt.plot([chain_boundary, chain_boundary], [0, total_num_res], color='red')\n",
"\n",
" plt.title('Predicted Aligned Error')\n",
" plt.xlabel('Scored residue')\n",
" plt.ylabel('Aligned residue')\n",
"\n",
"# Save the predicted aligned error (if it exists).\n",
"pae_output_path = os.path.join(output_dir, 'predicted_aligned_error.json')\n",
"if pae_outputs:\n",
" # Save predicted aligned error in the same format as the AF EMBL DB.\n",
" pae_data = notebook_utils.get_pae_json(pae=pae, max_pae=max_pae.item())\n",
" with open(pae_output_path, 'w') as f:\n",
" f.write(pae_data)\n",
"\n",
"# --- Download the predictions ---\n",
"!zip -q -r {output_dir}.zip {output_dir}\n",
"files.download(f'{output_dir}.zip')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "lUQAn5LYC5n4"
},
"source": [
"### Interpreting the prediction\n",
"\n",
"In general predicted LDDT (pLDDT) is best used for intra-domain confidence, whereas Predicted Aligned Error (PAE) is best used for determining between domain or between chain confidence.\n",
"\n",
"Please see the [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2), the [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1), and the [AlphaFold-Multimer paper](https://www.biorxiv.org/content/10.1101/2021.10.04.463034v1) as well as [our FAQ](https://alphafold.ebi.ac.uk/faq) on how to interpret AlphaFold predictions."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "jeb2z8DIA4om"
},
"source": [
"## FAQ \u0026 Troubleshooting\n",
"\n",
"\n",
"* How do I get a predicted protein structure for my protein?\n",
" * Click on the _Connect_ button on the top right to get started.\n",
" * Paste the amino acid sequence of your protein (without any headers) into the “Enter the amino acid sequence to fold”.\n",
" * Run all cells in the Colab, either by running them individually (with the play button on the left side) or via _Runtime_ \u003e _Run all._\n",
" * The predicted protein structure will be downloaded once all cells have been executed. Note: This can take minutes to hours - see below.\n",
"* How long will this take?\n",
" * Downloading the AlphaFold source code can take up to a few minutes.\n",
" * Downloading and installing the third-party software can take up to a few minutes.\n",
" * The search against genetic databases can take minutes to hours.\n",
" * Running AlphaFold and generating the prediction can take minutes to hours, depending on the length of your protein and on which GPU-type Colab has assigned you.\n",
"* My Colab no longer seems to be doing anything, what should I do?\n",
" * Some steps may take minutes to hours to complete.\n",
" * If nothing happens or if you receive an error message, try restarting your Colab runtime via _Runtime_ \u003e _Restart runtime_.\n",
" * If this doesn’t help, try resetting your Colab runtime via _Runtime_ \u003e _Factory reset runtime_.\n",
"* How does this compare to the open-source version of AlphaFold?\n",
" * This Colab version of AlphaFold searches a selected portion of the BFD dataset and currently doesn’t use templates, so its accuracy is reduced in comparison to the full version of AlphaFold that is described in the [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2) and [Github repo](https://github.com/deepmind/alphafold/) (the full version is available via the inference script).\n",
"* What is a Colab?\n",
" * See the [Colab FAQ](https://research.google.com/colaboratory/faq.html).\n",
"* I received a warning “Notebook requires high RAM”, what do I do?\n",
" * The resources allocated to your Colab vary. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n",
" * You can execute the Colab nonetheless.\n",
"* I received an error “Colab CPU runtime not supported” or “No GPU/TPU found”, what do I do?\n",
" * Colab CPU runtime is not supported. Try changing your runtime via _Runtime_ \u003e _Change runtime type_ \u003e _Hardware accelerator_ \u003e _GPU_.\n",
" * The type of GPU allocated to your Colab varies. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n",
" * If you receive “Cannot connect to GPU backend”, you can try again later to see if Colab allocates you a GPU.\n",
" * [Colab Pro](https://colab.research.google.com/signup) offers priority access to GPUs.\n",
"* I received an error “ModuleNotFoundError: No module named ...”, even though I ran the cell that imports it, what do I do?\n",
" * Colab notebooks on the free tier time out after a certain amount of time. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html#idle-timeouts). Try rerunning the whole notebook from the beginning.\n",
"* Does this tool install anything on my computer?\n",
" * No, everything happens in the cloud on Google Colab.\n",
" * At the end of the Colab execution a zip-archive with the obtained prediction will be automatically downloaded to your computer.\n",
"* How should I share feedback and bug reports?\n",
" * Please share any feedback and bug reports as an [issue](https://github.com/deepmind/alphafold/issues) on Github.\n",
"\n",
"\n",
"## Related work\n",
"\n",
"Take a look at these Colab notebooks provided by the community (please note that these notebooks may vary from our validated AlphaFold system and we cannot guarantee their accuracy):\n",
"\n",
"* The [ColabFold AlphaFold2 notebook](https://colab.research.google.com/github/sokrypton/ColabFold/blob/main/AlphaFold2.ipynb) by Sergey Ovchinnikov, Milot Mirdita and Martin Steinegger, which uses an API hosted at the Södinglab based on the MMseqs2 server ([Mirdita et al. 2019, Bioinformatics](https://academic.oup.com/bioinformatics/article/35/16/2856/5280135)) for the multiple sequence alignment creation.\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "YfPhvYgKC81B"
},
"source": [
"# License and Disclaimer\n",
"\n",
"This is not an officially-supported Google product.\n",
"\n",
"This Colab notebook and other information provided is for theoretical modelling only, caution should be exercised in its use. It is provided ‘as-is’ without any warranty of any kind, whether expressed or implied. Information is not intended to be a substitute for professional medical advice, diagnosis, or treatment, and does not constitute medical or other professional advice.\n",
"\n",
"Copyright 2021 DeepMind Technologies Limited.\n",
"\n",
"\n",
"## AlphaFold Code License\n",
"\n",
"Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0.\n",
"\n",
"Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n",
"\n",
"## Model Parameters License\n",
"\n",
"The AlphaFold parameters are made available for non-commercial use only, under the terms of the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) license. You can find details at: https://creativecommons.org/licenses/by-nc/4.0/legalcode\n",
"\n",
"\n",
"## Third-party software\n",
"\n",
"Use of the third-party software, libraries or code referred to in the [Acknowledgements section](https://github.com/deepmind/alphafold/#acknowledgements) in the AlphaFold README may be governed by separate terms and conditions or license provisions. Your use of the third-party software, libraries or code is subject to any such terms and you should check that you can comply with any applicable restrictions or terms and conditions before use.\n",
"\n",
"\n",
"## Mirrored Databases\n",
"\n",
"The following databases have been mirrored by DeepMind, and are available with reference to the following:\n",
"* UniProt: v2021\\_03 (unmodified), by The UniProt Consortium, available under a [Creative Commons Attribution-NoDerivatives 4.0 International License](http://creativecommons.org/licenses/by-nd/4.0/).\n",
"* UniRef90: v2021\\_03 (unmodified), by The UniProt Consortium, available under a [Creative Commons Attribution-NoDerivatives 4.0 International License](http://creativecommons.org/licenses/by-nd/4.0/).\n",
"* MGnify: v2019\\_05 (unmodified), by Mitchell AL et al., available free of all copyright restrictions and made fully and freely available for both non-commercial and commercial use under [CC0 1.0 Universal (CC0 1.0) Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/).\n",
"* BFD: (modified), by Steinegger M. and Söding J., modified by DeepMind, available under a [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by/4.0/). See the Methods section of the [AlphaFold proteome paper](https://www.nature.com/articles/s41586-021-03828-1) for details."
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"collapsed_sections": [],
"name": "AlphaFold.ipynb",
"private_outputs": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
}, },
{ "nbformat": 4,
"cell_type": "code", "nbformat_minor": 0
"execution_count": 0,
"metadata": {
"cellView": "form",
"id": "woIxeCPygt7K"
},
"outputs": [],
"source": [
"#@title Install third-party software\n",
"\n",
"#@markdown Please execute this cell by pressing the _Play_ button \n",
"#@markdown on the left to download and import third-party software \n",
"#@markdown in this Colab notebook. (See the [acknowledgements](https://github.com/deepmind/alphafold/#acknowledgements) in our readme.)\n",
"\n",
"#@markdown **Note**: This installs the software on the Colab \n",
"#@markdown notebook in the cloud and not on your computer.\n",
"\n",
"from IPython.utils import io\n",
"import os\n",
"import subprocess\n",
"import tqdm.notebook\n",
"\n",
"TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'\n",
"\n",
"try:\n",
" with tqdm.notebook.tqdm(total=100, bar_format=TQDM_BAR_FORMAT) as pbar:\n",
" with io.capture_output() as captured:\n",
" # Uninstall default Colab version of TF.\n",
" %shell pip uninstall -y tensorflow\n",
"\n",
" %shell sudo apt install --quiet --yes hmmer\n",
" pbar.update(6)\n",
"\n",
" # Install py3dmol.\n",
" %shell pip install py3dmol\n",
" pbar.update(2)\n",
"\n",
" # Install OpenMM and pdbfixer.\n",
" %shell rm -rf /opt/conda\n",
" %shell wget -q -P /tmp \\\n",
" https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \\\n",
" && bash /tmp/Miniconda3-latest-Linux-x86_64.sh -b -p /opt/conda \\\n",
" && rm /tmp/Miniconda3-latest-Linux-x86_64.sh\n",
" pbar.update(9)\n",
"\n",
" PATH=%env PATH\n",
" %env PATH=/opt/conda/bin:{PATH}\n",
" %shell conda update -qy conda \\\n",
" && conda install -qy -c conda-forge \\\n",
" python=3.7 \\\n",
" openmm=7.5.1 \\\n",
" pdbfixer\n",
" pbar.update(80)\n",
"\n",
" # Create a ramdisk to store a database chunk to make Jackhmmer run fast.\n",
" %shell sudo mkdir -m 777 --parents /tmp/ramdisk\n",
" %shell sudo mount -t tmpfs -o size=9G ramdisk /tmp/ramdisk\n",
" pbar.update(2)\n",
"\n",
" %shell wget -q -P /content \\\n",
" https://git.scicore.unibas.ch/schwede/openstructure/-/raw/7102c63615b64735c4941278d92b554ec94415f8/modules/mol/alg/src/stereo_chemical_props.txt\n",
" pbar.update(1)\n",
"except subprocess.CalledProcessError:\n",
" print(captured)\n",
" raise"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"cellView": "form",
"id": "VzJ5iMjTtoZw"
},
"outputs": [],
"source": [
"#@title Download AlphaFold\n",
"\n",
"#@markdown Please execute this cell by pressing the *Play* button on \n",
"#@markdown the left.\n",
"\n",
"GIT_REPO = 'https://github.com/deepmind/alphafold'\n",
"\n",
"SOURCE_URL = 'https://storage.googleapis.com/alphafold/alphafold_params_2021-07-14.tar'\n",
"PARAMS_DIR = './alphafold/data/params'\n",
"PARAMS_PATH = os.path.join(PARAMS_DIR, os.path.basename(SOURCE_URL))\n",
"\n",
"try:\n",
" with tqdm.notebook.tqdm(total=100, bar_format=TQDM_BAR_FORMAT) as pbar:\n",
" with io.capture_output() as captured:\n",
" %shell rm -rf alphafold\n",
" %shell git clone {GIT_REPO} alphafold\n",
" pbar.update(8)\n",
" # Install the required versions of all dependencies.\n",
" %shell pip3 install -r ./alphafold/requirements.txt\n",
" # Run setup.py to install only AlphaFold.\n",
" %shell pip3 install --no-dependencies ./alphafold\n",
" pbar.update(10)\n",
"\n",
" # Apply OpenMM patch.\n",
" %shell pushd /opt/conda/lib/python3.7/site-packages/ && \\\n",
" patch -p0 < /content/alphafold/docker/openmm.patch && \\\n",
" popd\n",
" \n",
" %shell mkdir -p /content/alphafold/common\n",
" %shell cp -f /content/stereo_chemical_props.txt /content/alphafold/common\n",
"\n",
" %shell mkdir --parents \"{PARAMS_DIR}\"\n",
" %shell wget -O \"{PARAMS_PATH}\" \"{SOURCE_URL}\"\n",
" pbar.update(27)\n",
"\n",
" %shell tar --extract --verbose --file=\"{PARAMS_PATH}\" \\\n",
" --directory=\"{PARAMS_DIR}\" --preserve-permissions\n",
" %shell rm \"{PARAMS_PATH}\"\n",
" pbar.update(55)\n",
"except subprocess.CalledProcessError:\n",
" print(captured)\n",
" raise\n",
"\n",
"import jax\n",
"if jax.local_devices()[0].platform == 'tpu':\n",
" raise RuntimeError('Colab TPU runtime not supported. Change it to GPU via Runtime -> Change Runtime Type -> Hardware accelerator -> GPU.')\n",
"elif jax.local_devices()[0].platform == 'cpu':\n",
" raise RuntimeError('Colab CPU runtime not supported. Change it to GPU via Runtime -> Change Runtime Type -> Hardware accelerator -> GPU.')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "W4JpOs6oA-QS"
},
"source": [
"## Making a prediction\n",
"\n",
"Please paste the sequence of your protein in the text box below, then run the remaining cells via _Runtime_ > _Run after_. You can also run the cells individually by pressing the _Play_ button on the left.\n",
"\n",
"Note that the search against databases and the actual prediction can take some time, from minutes to hours, depending on the length of the protein and what type of GPU you are allocated by Colab (see FAQ below)."
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"cellView": "form",
"id": "rowN0bVYLe9n"
},
"outputs": [],
"source": [
"#@title Enter the amino acid sequence to fold ⬇️\n",
"sequence = 'MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEHAAQAAKHDAEHHAPKPH' #@param {type:\"string\"}\n",
"\n",
"MIN_SEQUENCE_LENGTH = 16\n",
"MAX_SEQUENCE_LENGTH = 2500\n",
"\n",
"# Remove all whitespaces, tabs and end lines; upper-case\n",
"sequence = sequence.translate(str.maketrans('', '', ' \\n\\t')).upper()\n",
"aatypes = set('ACDEFGHIKLMNPQRSTVWY') # 20 standard aatypes\n",
"if not set(sequence).issubset(aatypes):\n",
" raise Exception(f'Input sequence contains non-amino acid letters: {set(sequence) - aatypes}. AlphaFold only supports 20 standard amino acids as inputs.')\n",
"if len(sequence) < MIN_SEQUENCE_LENGTH:\n",
" raise Exception(f'Input sequence is too short: {len(sequence)} amino acids, while the minimum is {MIN_SEQUENCE_LENGTH}')\n",
"if len(sequence) > MAX_SEQUENCE_LENGTH:\n",
" raise Exception(f'Input sequence is too long: {len(sequence)} amino acids, while the maximum is {MAX_SEQUENCE_LENGTH}. Please use the full AlphaFold system for long sequences.')"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"cellView": "form",
"id": "2tTeTTsLKPjB"
},
"outputs": [],
"source": [
"#@title Search against genetic databases\n",
"\n",
"#@markdown Once this cell has been executed, you will see\n",
"#@markdown statistics about the multiple sequence alignment \n",
"#@markdown (MSA) that will be used by AlphaFold. In particular, \n",
"#@markdown you’ll see how well each residue is covered by similar \n",
"#@markdown sequences in the MSA.\n",
"\n",
"# --- Python imports ---\n",
"import sys\n",
"sys.path.append('/opt/conda/lib/python3.7/site-packages')\n",
"\n",
"import os\n",
"os.environ['TF_FORCE_UNIFIED_MEMORY'] = '1'\n",
"os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '2.0'\n",
"\n",
"from urllib import request\n",
"from concurrent import futures\n",
"from google.colab import files\n",
"import json\n",
"from matplotlib import gridspec\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import py3Dmol\n",
"\n",
"from alphafold.model import model\n",
"from alphafold.model import config\n",
"from alphafold.model import data\n",
"\n",
"from alphafold.data import parsers\n",
"from alphafold.data import pipeline\n",
"from alphafold.data.tools import jackhmmer\n",
"\n",
"from alphafold.common import protein\n",
"\n",
"from alphafold.relax import relax\n",
"from alphafold.relax import utils\n",
"\n",
"from IPython import display\n",
"from ipywidgets import GridspecLayout\n",
"from ipywidgets import Output\n",
"\n",
"# Color bands for visualizing plddt\n",
"PLDDT_BANDS = [(0, 50, '#FF7D45'),\n",
" (50, 70, '#FFDB13'),\n",
" (70, 90, '#65CBF3'),\n",
" (90, 100, '#0053D6')]\n",
"\n",
"# --- Find the closest source ---\n",
"test_url_pattern = 'https://storage.googleapis.com/alphafold-colab{:s}/latest/uniref90_2021_03.fasta.1'\n",
"ex = futures.ThreadPoolExecutor(3)\n",
"def fetch(source):\n",
" request.urlretrieve(test_url_pattern.format(source))\n",
" return source\n",
"fs = [ex.submit(fetch, source) for source in ['', '-europe', '-asia']]\n",
"source = None\n",
"for f in futures.as_completed(fs):\n",
" source = f.result()\n",
" ex.shutdown()\n",
" break\n",
"\n",
"# --- Search against genetic databases ---\n",
"with open('target.fasta', 'wt') as f:\n",
" f.write(f'>query\\n{sequence}')\n",
"\n",
"# Run the search against chunks of genetic databases (since the genetic\n",
"# databases don't fit in Colab ramdisk).\n",
"\n",
"jackhmmer_binary_path = '/usr/bin/jackhmmer'\n",
"dbs = []\n",
"\n",
"num_jackhmmer_chunks = {'uniref90': 59, 'smallbfd': 17, 'mgnify': 71}\n",
"total_jackhmmer_chunks = sum(num_jackhmmer_chunks.values())\n",
"with tqdm.notebook.tqdm(total=total_jackhmmer_chunks, bar_format=TQDM_BAR_FORMAT) as pbar:\n",
" def jackhmmer_chunk_callback(i):\n",
" pbar.update(n=1)\n",
"\n",
" pbar.set_description('Searching uniref90')\n",
" jackhmmer_uniref90_runner = jackhmmer.Jackhmmer(\n",
" binary_path=jackhmmer_binary_path,\n",
" database_path=f'https://storage.googleapis.com/alphafold-colab{source}/latest/uniref90_2021_03.fasta',\n",
" get_tblout=True,\n",
" num_streamed_chunks=num_jackhmmer_chunks['uniref90'],\n",
" streaming_callback=jackhmmer_chunk_callback,\n",
" z_value=135301051)\n",
" dbs.append(('uniref90', jackhmmer_uniref90_runner.query('target.fasta')))\n",
"\n",
" pbar.set_description('Searching smallbfd')\n",
" jackhmmer_smallbfd_runner = jackhmmer.Jackhmmer(\n",
" binary_path=jackhmmer_binary_path,\n",
" database_path=f'https://storage.googleapis.com/alphafold-colab{source}/latest/bfd-first_non_consensus_sequences.fasta',\n",
" get_tblout=True,\n",
" num_streamed_chunks=num_jackhmmer_chunks['smallbfd'],\n",
" streaming_callback=jackhmmer_chunk_callback,\n",
" z_value=65984053)\n",
" dbs.append(('smallbfd', jackhmmer_smallbfd_runner.query('target.fasta')))\n",
"\n",
" pbar.set_description('Searching mgnify')\n",
" jackhmmer_mgnify_runner = jackhmmer.Jackhmmer(\n",
" binary_path=jackhmmer_binary_path,\n",
" database_path=f'https://storage.googleapis.com/alphafold-colab{source}/latest/mgy_clusters_2019_05.fasta',\n",
" get_tblout=True,\n",
" num_streamed_chunks=num_jackhmmer_chunks['mgnify'],\n",
" streaming_callback=jackhmmer_chunk_callback,\n",
" z_value=304820129)\n",
" dbs.append(('mgnify', jackhmmer_mgnify_runner.query('target.fasta')))\n",
"\n",
"\n",
"# --- Extract the MSAs and visualize ---\n",
"# Extract the MSAs from the Stockholm files.\n",
"# NB: deduplication happens later in pipeline.make_msa_features.\n",
"\n",
"mgnify_max_hits = 501\n",
"\n",
"msas = []\n",
"deletion_matrices = []\n",
"full_msa = []\n",
"for db_name, db_results in dbs:\n",
" unsorted_results = []\n",
" for i, result in enumerate(db_results):\n",
" msa, deletion_matrix, target_names = parsers.parse_stockholm(result['sto'])\n",
" e_values_dict = parsers.parse_e_values_from_tblout(result['tbl'])\n",
" e_values = [e_values_dict[t.split('/')[0]] for t in target_names]\n",
" zipped_results = zip(msa, deletion_matrix, target_names, e_values)\n",
" if i != 0:\n",
" # Only take query from the first chunk\n",
" zipped_results = [x for x in zipped_results if x[2] != 'query']\n",
" unsorted_results.extend(zipped_results)\n",
" sorted_by_evalue = sorted(unsorted_results, key=lambda x: x[3])\n",
" db_msas, db_deletion_matrices, _, _ = zip(*sorted_by_evalue)\n",
" if db_msas:\n",
" if db_name == 'mgnify':\n",
" db_msas = db_msas[:mgnify_max_hits]\n",
" db_deletion_matrices = db_deletion_matrices[:mgnify_max_hits]\n",
" full_msa.extend(db_msas)\n",
" msas.append(db_msas)\n",
" deletion_matrices.append(db_deletion_matrices)\n",
" msa_size = len(set(db_msas))\n",
" print(f'{msa_size} Sequences Found in {db_name}')\n",
"\n",
"deduped_full_msa = list(dict.fromkeys(full_msa))\n",
"total_msa_size = len(deduped_full_msa)\n",
"print(f'\\n{total_msa_size} Sequences Found in Total\\n')\n",
"\n",
"aa_map = {restype: i for i, restype in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ-')}\n",
"msa_arr = np.array([[aa_map[aa] for aa in seq] for seq in deduped_full_msa])\n",
"num_alignments, num_res = msa_arr.shape\n",
"\n",
"fig = plt.figure(figsize=(12, 3))\n",
"plt.title('Per-Residue Count of Non-Gap Amino Acids in the MSA')\n",
"plt.plot(np.sum(msa_arr != aa_map['-'], axis=0), color='black')\n",
"plt.ylabel('Non-Gap Count')\n",
"plt.yticks(range(0, num_alignments + 1, max(1, int(num_alignments / 3))))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"cellView": "form",
"id": "XUo6foMQxwS2"
},
"outputs": [],
"source": [
"#@title Run AlphaFold and download prediction\n",
"\n",
"#@markdown Once this cell has been executed, a zip-archive with \n",
"#@markdown the obtained prediction will be automatically downloaded \n",
"#@markdown to your computer.\n",
"\n",
"# --- Run the model ---\n",
"model_names = ['model_1', 'model_2', 'model_3', 'model_4', 'model_5', 'model_2_ptm']\n",
"\n",
"def _placeholder_template_feats(num_templates_, num_res_):\n",
" return {\n",
" 'template_aatype': np.zeros([num_templates_, num_res_, 22], np.float32),\n",
" 'template_all_atom_masks': np.zeros([num_templates_, num_res_, 37, 3], np.float32),\n",
" 'template_all_atom_positions': np.zeros([num_templates_, num_res_, 37], np.float32),\n",
" 'template_domain_names': np.zeros([num_templates_], np.float32),\n",
" 'template_sum_probs': np.zeros([num_templates_], np.float32),\n",
" }\n",
"\n",
"output_dir = 'prediction'\n",
"os.makedirs(output_dir, exist_ok=True)\n",
"\n",
"plddts = {}\n",
"pae_outputs = {}\n",
"unrelaxed_proteins = {}\n",
"\n",
"with tqdm.notebook.tqdm(total=len(model_names) + 1, bar_format=TQDM_BAR_FORMAT) as pbar:\n",
" for model_name in model_names:\n",
" pbar.set_description(f'Running {model_name}')\n",
" num_templates = 0\n",
" num_res = len(sequence)\n",
"\n",
" feature_dict = {}\n",
" feature_dict.update(pipeline.make_sequence_features(sequence, 'test', num_res))\n",
" feature_dict.update(pipeline.make_msa_features(msas, deletion_matrices=deletion_matrices))\n",
" feature_dict.update(_placeholder_template_feats(num_templates, num_res))\n",
"\n",
" cfg = config.model_config(model_name)\n",
" params = data.get_model_haiku_params(model_name, './alphafold/data')\n",
" model_runner = model.RunModel(cfg, params)\n",
" processed_feature_dict = model_runner.process_features(feature_dict,\n",
" random_seed=0)\n",
" prediction_result = model_runner.predict(processed_feature_dict)\n",
"\n",
" mean_plddt = prediction_result['plddt'].mean()\n",
"\n",
" if 'predicted_aligned_error' in prediction_result:\n",
" pae_outputs[model_name] = (\n",
" prediction_result['predicted_aligned_error'],\n",
" prediction_result['max_predicted_aligned_error']\n",
" )\n",
" else:\n",
" # Get the pLDDT confidence metrics. Do not put pTM models here as they\n",
" # should never get selected.\n",
" plddts[model_name] = prediction_result['plddt']\n",
"\n",
" # Set the b-factors to the per-residue plddt.\n",
" final_atom_mask = prediction_result['structure_module']['final_atom_mask']\n",
" b_factors = prediction_result['plddt'][:, None] * final_atom_mask\n",
" unrelaxed_protein = protein.from_prediction(processed_feature_dict,\n",
" prediction_result,\n",
" b_factors=b_factors)\n",
" unrelaxed_proteins[model_name] = unrelaxed_protein\n",
"\n",
" # Delete unused outputs to save memory.\n",
" del model_runner\n",
" del params\n",
" del prediction_result\n",
" pbar.update(n=1)\n",
"\n",
" # --- AMBER relax the best model ---\n",
" pbar.set_description(f'AMBER relaxation')\n",
" amber_relaxer = relax.AmberRelaxation(\n",
" max_iterations=0,\n",
" tolerance=2.39,\n",
" stiffness=10.0,\n",
" exclude_residues=[],\n",
" max_outer_iterations=20)\n",
" # Find the best model according to the mean pLDDT.\n",
" best_model_name = max(plddts.keys(), key=lambda x: plddts[x].mean())\n",
" relaxed_pdb, _, _ = amber_relaxer.process(\n",
" prot=unrelaxed_proteins[best_model_name])\n",
" pbar.update(n=1) # Finished AMBER relax.\n",
"\n",
"# Construct multiclass b-factors to indicate confidence bands\n",
"# 0=very low, 1=low, 2=confident, 3=very high\n",
"banded_b_factors = []\n",
"for plddt in plddts[best_model_name]:\n",
" for idx, (min_val, max_val, _) in enumerate(PLDDT_BANDS):\n",
" if plddt >= min_val and plddt <= max_val:\n",
" banded_b_factors.append(idx)\n",
" break\n",
"banded_b_factors = np.array(banded_b_factors)[:, None] * final_atom_mask\n",
"to_visualize_pdb = utils.overwrite_b_factors(relaxed_pdb, banded_b_factors)\n",
"\n",
"\n",
"# Write out the prediction\n",
"pred_output_path = os.path.join(output_dir, 'selected_prediction.pdb')\n",
"with open(pred_output_path, 'w') as f:\n",
" f.write(relaxed_pdb)\n",
"\n",
"\n",
"# --- Visualise the prediction & confidence ---\n",
"show_sidechains = True\n",
"def plot_plddt_legend():\n",
" \"\"\"Plots the legend for pLDDT.\"\"\"\n",
" thresh = [\n",
" 'Very low (pLDDT < 50)',\n",
" 'Low (70 > pLDDT > 50)',\n",
" 'Confident (90 > pLDDT > 70)',\n",
" 'Very high (pLDDT > 90)']\n",
"\n",
" colors = [x[2] for x in PLDDT_BANDS]\n",
"\n",
" plt.figure(figsize=(2, 2))\n",
" for c in colors:\n",
" plt.bar(0, 0, color=c)\n",
" plt.legend(thresh, frameon=False, loc='center', fontsize=20)\n",
" plt.xticks([])\n",
" plt.yticks([])\n",
" ax = plt.gca()\n",
" ax.spines['right'].set_visible(False)\n",
" ax.spines['top'].set_visible(False)\n",
" ax.spines['left'].set_visible(False)\n",
" ax.spines['bottom'].set_visible(False)\n",
" plt.title('Model Confidence', fontsize=20, pad=20)\n",
" return plt\n",
"\n",
"# Color the structure by per-residue pLDDT\n",
"color_map = {i: bands[2] for i, bands in enumerate(PLDDT_BANDS)}\n",
"view = py3Dmol.view(width=800, height=600)\n",
"view.addModelsAsFrames(to_visualize_pdb)\n",
"style = {'cartoon': {\n",
" 'colorscheme': {\n",
" 'prop': 'b',\n",
" 'map': color_map}\n",
" }}\n",
"if show_sidechains:\n",
" style['stick'] = {}\n",
"view.setStyle({'model': -1}, style)\n",
"view.zoomTo()\n",
"\n",
"grid = GridspecLayout(1, 2)\n",
"out = Output()\n",
"with out:\n",
" view.show()\n",
"grid[0, 0] = out\n",
"\n",
"out = Output()\n",
"with out:\n",
" plot_plddt_legend().show()\n",
"grid[0, 1] = out\n",
"\n",
"display.display(grid)\n",
"\n",
"# Display pLDDT and predicted aligned error (if output by the model).\n",
"if pae_outputs:\n",
" num_plots = 2\n",
"else:\n",
" num_plots = 1\n",
"\n",
"plt.figure(figsize=[8 * num_plots, 6])\n",
"plt.subplot(1, num_plots, 1)\n",
"plt.plot(plddts[best_model_name])\n",
"plt.title('Predicted LDDT')\n",
"plt.xlabel('Residue')\n",
"plt.ylabel('pLDDT')\n",
"\n",
"if num_plots == 2:\n",
" plt.subplot(1, 2, 2)\n",
" pae, max_pae = list(pae_outputs.values())[0]\n",
" plt.imshow(pae, vmin=0., vmax=max_pae, cmap='Greens_r')\n",
" plt.colorbar(fraction=0.046, pad=0.04)\n",
" plt.title('Predicted Aligned Error')\n",
" plt.xlabel('Scored residue')\n",
" plt.ylabel('Aligned residue')\n",
"\n",
"# Save pLDDT and predicted aligned error (if it exists)\n",
"pae_output_path = os.path.join(output_dir, 'predicted_aligned_error.json')\n",
"if pae_outputs:\n",
" # Save predicted aligned error in the same format as the AF EMBL DB\n",
" rounded_errors = np.round(pae.astype(np.float64), decimals=1)\n",
" indices = np.indices((len(rounded_errors), len(rounded_errors))) + 1\n",
" indices_1 = indices[0].flatten().tolist()\n",
" indices_2 = indices[1].flatten().tolist()\n",
" pae_data = json.dumps([{\n",
" 'residue1': indices_1,\n",
" 'residue2': indices_2,\n",
" 'distance': rounded_errors.flatten().tolist(),\n",
" 'max_predicted_aligned_error': max_pae.item()\n",
" }],\n",
" indent=None,\n",
" separators=(',', ':'))\n",
" with open(pae_output_path, 'w') as f:\n",
" f.write(pae_data)\n",
"\n",
"\n",
"# --- Download the predictions ---\n",
"!zip -q -r {output_dir}.zip {output_dir}\n",
"files.download(f'{output_dir}.zip')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "lUQAn5LYC5n4"
},
"source": [
"### Interpreting the prediction\n",
"\n",
"Please see the [AlphaFold methods paper](https://www.nature.com/articles/s41586-021-03819-2) and the [AlphaFold predictions of the human proteome paper](https://www.nature.com/articles/s41586-021-03828-1), as well as [our FAQ](https://alphafold.ebi.ac.uk/faq) on how to interpret AlphaFold predictions."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "jeb2z8DIA4om"
},
"source": [
"## FAQ & Troubleshooting\n",
"\n",
"\n",
"* How do I get a predicted protein structure for my protein?\n",
" * Click on the _Connect_ button on the top right to get started.\n",
" * Paste the amino acid sequence of your protein (without any headers) into the “Enter the amino acid sequence to fold”.\n",
" * Run all cells in the Colab, either by running them individually (with the play button on the left side) or via _Runtime_ > _Run all._\n",
" * The predicted protein structure will be downloaded once all cells have been executed. Note: This can take minutes to hours - see below.\n",
"* How long will this take?\n",
" * Downloading the AlphaFold source code can take up to a few minutes.\n",
" * Downloading and installing the third-party software can take up to a few minutes.\n",
" * The search against genetic databases can take minutes to hours.\n",
" * Running AlphaFold and generating the prediction can take minutes to hours, depending on the length of your protein and on which GPU-type Colab has assigned you.\n",
"* My Colab no longer seems to be doing anything, what should I do?\n",
" * Some steps may take minutes to hours to complete.\n",
" * If nothing happens or if you receive an error message, try restarting your Colab runtime via _Runtime_ > _Restart runtime_.\n",
" * If this doesn’t help, try resetting your Colab runtime via _Runtime_ > _Factory reset runtime_.\n",
"* How does this compare to the open-source version of AlphaFold?\n",
" * This Colab version of AlphaFold searches a selected portion of the BFD dataset and currently doesn’t use templates, so its accuracy is reduced in comparison to the full version of AlphaFold that is described in the [AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2) and [Github repo](https://github.com/deepmind/alphafold/) (the full version is available via the inference script).\n",
"* What is a Colab?\n",
" * See the [Colab FAQ](https://research.google.com/colaboratory/faq.html).\n",
"* I received a warning “Notebook requires high RAM”, what do I do?\n",
" * The resources allocated to your Colab vary. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n",
" * You can execute the Colab nonetheless.\n",
"* I received an error “Colab CPU runtime not supported” or “No GPU/TPU found”, what do I do?\n",
" * Colab CPU runtime is not supported. Try changing your runtime via _Runtime_ > _Change runtime type_ > _Hardware accelerator_ > _GPU_.\n",
" * The type of GPU allocated to your Colab varies. See the [Colab FAQ](https://research.google.com/colaboratory/faq.html) for more details.\n",
" * If you receive “Cannot connect to GPU backend”, you can try again later to see if Colab allocates you a GPU.\n",
" * [Colab Pro](https://colab.research.google.com/signup) offers priority access to GPUs. \n",
"* Does this tool install anything on my computer?\n",
" * No, everything happens in the cloud on Google Colab.\n",
" * At the end of the Colab execution a zip-archive with the obtained prediction will be automatically downloaded to your computer.\n",
"* How should I share feedback and bug reports?\n",
" * Please share any feedback and bug reports as an [issue](https://github.com/deepmind/alphafold/issues) on Github.\n",
"\n",
"\n",
"## Related work\n",
"\n",
"Take a look at these Colab notebooks provided by the community (please note that these notebooks may vary from our validated AlphaFold system and we cannot guarantee their accuracy):\n",
"\n",
"* The [ColabFold AlphaFold2 notebook](https://colab.research.google.com/github/sokrypton/ColabFold/blob/main/AlphaFold2.ipynb) by Sergey Ovchinnikov, Milot Mirdita and Martin Steinegger, which uses an API hosted at the Södinglab based on the MMseqs2 server ([Mirdita et al. 2019, Bioinformatics](https://academic.oup.com/bioinformatics/article/35/16/2856/5280135)) for the multiple sequence alignment creation.\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "YfPhvYgKC81B"
},
"source": [
"# License and Disclaimer\n",
"\n",
"This is not an officially-supported Google product.\n",
"\n",
"This Colab notebook and other information provided is for theoretical modelling only, caution should be exercised in its use. It is provided ‘as-is’ without any warranty of any kind, whether expressed or implied. Information is not intended to be a substitute for professional medical advice, diagnosis, or treatment, and does not constitute medical or other professional advice.\n",
"\n",
"Copyright 2021 DeepMind Technologies Limited.\n",
"\n",
"\n",
"## AlphaFold Code License\n",
"\n",
"Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0.\n",
"\n",
"Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n",
"\n",
"## Model Parameters License\n",
"\n",
"The AlphaFold parameters are made available for non-commercial use only, under the terms of the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) license. You can find details at: https://creativecommons.org/licenses/by-nc/4.0/legalcode\n",
"\n",
"\n",
"## Third-party software\n",
"\n",
"Use of the third-party software, libraries or code referred to in the [Acknowledgements section](https://github.com/deepmind/alphafold/#acknowledgements) in the AlphaFold README may be governed by separate terms and conditions or license provisions. Your use of the third-party software, libraries or code is subject to any such terms and you should check that you can comply with any applicable restrictions or terms and conditions before use.\n",
"\n",
"\n",
"## Mirrored Databases\n",
"\n",
"The following databases have been mirrored by DeepMind, and are available with reference to the following:\n",
"* UniRef90: v2021\\_03 (unmodified), by The UniProt Consortium, available under a [Creative Commons Attribution-NoDerivatives 4.0 International License](http://creativecommons.org/licenses/by-nd/4.0/).\n",
"* MGnify: v2019\\_05 (unmodified), by Mitchell AL et al., available free of all copyright restrictions and made fully and freely available for both non-commercial and commercial use under [CC0 1.0 Universal (CC0 1.0) Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/).\n",
"* BFD: (modified), by Steinegger M. and Söding J., modified by DeepMind, available under a [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by/4.0/). See the Methods section of the [AlphaFold proteome paper](https://www.nature.com/articles/s41586-021-03828-1) for details."
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"collapsed_sections": [],
"name": "AlphaFold.ipynb"
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
} }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment