Commit a1c29028 authored by zhangqha's avatar zhangqha
Browse files

update uni-fold

parents
Pipeline #183 canceled with stages
# Copyright (c) DP Technology.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import importlib
import logging
import os
import sys
import warnings
from functools import partial
from typing import List, Callable, Any, Dict
import torch
import torch.nn.functional as F
try:
import unicore_fused_multi_tensor
HAS_MULTI_TENSOR = True
except:
print("fused_multi_tensor is not installed corrected")
HAS_MULTI_TENSOR = False
try:
import unicore_fused_rounding
HAS_FUSED_ROUNDING = True
except:
print("fused_rounding is not installed corrected")
HAS_FUSED_ROUNDING = False
if not torch.cuda.is_available() or torch.cuda.get_device_capability()[0] < 7:
HAS_MULTI_TENSOR = False
HAS_FUSED_ROUNDING = False
logger = logging.getLogger(__name__)
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample, device=None):
device = device or torch.cuda.current_device()
def _move_to_cuda(tensor):
# non_blocking is ignored if tensor is not pinned, so we can always set
# to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620)
return tensor.to(device=device, non_blocking=True)
return apply_to_sample(_move_to_cuda, sample)
def move_to_cpu(sample):
def _move_to_cpu(tensor):
# PyTorch has poor support for half tensors (float16) on CPU.
# Move any such tensors to float32.
if tensor.dtype in {torch.bfloat16, torch.float16}:
tensor = tensor.to(dtype=torch.float32)
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor:
per_device_grads = {}
norms = []
for grad in grads:
device = grad.device
dtype = grad.dtype
if device not in per_device_grads:
per_device_grads[device] = {}
if dtype not in per_device_grads[device]:
per_device_grads[device][dtype] = []
per_device_grads[device][dtype].append(grad)
for device in per_device_grads.keys():
for dtype in per_device_grads[device].keys():
cur_grads = per_device_grads[device][dtype]
if HAS_MULTI_TENSOR and device.type == "cuda":
norm = unicore_fused_multi_tensor.l2norm(
chunk_size, [cur_grads]
)
norms.append(norm)
else:
norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_grads]
total_norm = torch.norm(torch.stack(norms), p=2, dtype=torch.float32)
return total_norm
@torch.no_grad()
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.0)
else:
return torch.tensor(0.0)
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
total_norm = multi_tensor_total_norm(grads)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads:
g.mul_(clip_coef)
return total_norm
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path) and not os.path.isfile(os.path.dirname(module_path)):
unicore_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir)
if os.path.exists(unicore_rel_path):
module_path = unicore_rel_path
else:
unicore_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(unicore_rel_path):
module_path = unicore_rel_path
else:
raise FileNotFoundError(module_path)
# ensure that user modules are only imported once
import_user_module.memo = getattr(import_user_module, "memo", set())
if module_path not in import_user_module.memo:
import_user_module.memo.add(module_path)
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
else:
raise ImportError(
"Failed to import --user-dir={} because the corresponding module name "
"({}) is not globally unique. Please rename the directory to "
"something unique and try again.".format(module_path, module_name)
)
def get_activation_fn(activation: str) -> Callable:
""" Returns the activation function corresponding to `activation` """
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"tanh",
"linear",
]
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def get_rng_state():
state = {"torch_rng_state": torch.get_rng_state()}
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(state):
torch.set_rng_state(state["torch_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
@contextlib.contextmanager
def torch_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
def check_seed(s):
assert type(s) == int or type(s) == np.int32 or type(s) == np.int64
check_seed(seed)
if len(addl_seeds) > 0:
for s in addl_seeds:
check_seed(s)
seed = int(hash((seed, *addl_seeds)) % 1e8)
state = get_rng_state()
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
try:
yield
finally:
set_rng_state(state)
class CudaEnvironment(object):
def __init__(self):
cur_device = torch.cuda.current_device()
prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device))
self.name = prop.name
self.major = prop.major
self.minor = prop.minor
self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024
@staticmethod
def pretty_print_cuda_env_list(cuda_env_list):
"""
Given a list of CudaEnviorments, pretty print them
"""
num_workers = len(cuda_env_list)
center = "CUDA enviroments for all {} workers".format(num_workers)
banner_len = 40 - len(center) // 2
first_line = "*" * banner_len + center + "*" * banner_len
logger.info(first_line)
for r, env in enumerate(cuda_env_list):
logger.info(
"rank {:3d}: ".format(r)
+ "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor)
+ "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB)
+ "name = {:40s}".format(env.name)
)
logger.info(first_line)
def csv_str_list(x):
return x.split(",")
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_str_dict(x, type=dict):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
return x
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
def checkpoint_sequential(
functions,
input,
):
def wrap_tuple(a):
return (a,) if type(a) is not tuple else a
def exec(func, a):
return wrap_tuple(func(*a))
def get_wrap_exec(func):
def wrap_exec(*a):
return exec(func, a)
return wrap_exec
input = wrap_tuple(input)
is_grad_enabled = torch.is_grad_enabled()
if is_grad_enabled:
for func in functions:
input = torch.utils.checkpoint.checkpoint(get_wrap_exec(func), *input)
else:
for func in functions:
input = exec(func, input)
return input
def permute_final_dims(tensor: torch.Tensor, inds: List[int]):
zero_index = -1 * len(inds)
first_inds = list(range(len(tensor.shape[:zero_index])))
return tensor.permute(first_inds + [zero_index + i for i in inds])
def flatten_final_dims(t: torch.Tensor, num_dims: int):
return t.reshape(t.shape[:-num_dims] + (-1,))
def masked_mean(mask, value, dim, eps=1e-10):
mask = mask.expand(*value.shape)
return torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim))
def dict_multimap(fn, dicts):
first = dicts[0]
new_dict = {}
for k, v in first.items():
all_v = [d[k] for d in dicts]
if type(v) is dict:
new_dict[k] = dict_multimap(fn, all_v)
else:
new_dict[k] = fn(all_v)
return new_dict
def one_hot(x, num_classes, dtype=torch.float32):
x_one_hot = torch.zeros(*x.shape, num_classes, dtype=dtype, device=x.device)
x_one_hot.scatter_(-1, x.long().unsqueeze(-1), 1)
return x_one_hot
def batched_gather(data, inds, dim=0, num_batch_dims=0):
assert dim < 0 or dim - num_batch_dims >= 0
ranges = []
for i, s in enumerate(data.shape[:num_batch_dims]):
r = torch.arange(s)
r = r.view(*(*((1,) * i), -1, *((1,) * (len(inds.shape) - i - 1))))
ranges.append(r)
remaining_dims = [
slice(None) for _ in range(len(data.shape) - num_batch_dims)
]
remaining_dims[dim - num_batch_dims if dim >= 0 else dim] = inds
ranges.extend(remaining_dims)
return data[ranges]
def dict_map(fn, dic, leaf_type):
new_dict = {}
for k, v in dic.items():
if type(v) is dict:
new_dict[k] = dict_map(fn, v, leaf_type)
else:
new_dict[k] = tree_map(fn, v, leaf_type)
return new_dict
def tree_map(fn, tree, leaf_type):
if isinstance(tree, dict):
return dict_map(fn, tree, leaf_type)
elif isinstance(tree, list):
return [tree_map(fn, x, leaf_type) for x in tree]
elif isinstance(tree, tuple):
return tuple([tree_map(fn, x, leaf_type) for x in tree])
elif isinstance(tree, leaf_type):
try:
return fn(tree)
except:
raise ValueError(f"cannot apply {fn} on {tree}.")
else:
raise ValueError(f"{type(tree)} not supported")
tensor_tree_map = partial(tree_map, leaf_type=torch.Tensor)
def fp32_to_bf16_sr(t, o):
if HAS_FUSED_ROUNDING and t.device.type == "cuda":
unicore_fused_rounding.fp32_to_bf16_sr(t, o)
else:
r = (torch.rand(size=t.size(), device=t.device, dtype=torch.float32) - 0.5) / 256
m, e = torch.frexp(t)
t = t + torch.ldexp(r, e)
o.data.copy_(t.bfloat16())
def set_jit_fusion_options():
"""Set PyTorch JIT layer fusion options."""
# flags required to enable jit fusion kernels
# legacy pytorch fuser
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
#!/usr/bin/env python3 -u
# Copyright (c) DP Technology.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import logging
import math
import os
import sys
from typing import Dict, Optional, Any, List, Tuple, Callable
import numpy as np
import torch
from unicore import (
checkpoint_utils,
options,
tasks,
utils,
)
from unicore.data import iterators
from unicore.distributed import utils as distributed_utils
from unicore.logging import meters, metrics, progress_bar
from unicore.trainer import Trainer
from multiprocessing.pool import ThreadPool
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("unicore_cli.train")
def main(args) -> None:
utils.import_user_module(args)
utils.set_jit_fusion_options()
assert (
args.batch_size is not None
), "Must specify batch size either with --batch-size"
metrics.reset()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
checkpoint_utils.verify_checkpoint_directory(args.tmp_save_dir)
ckp_copy_thread = ThreadPool(processes=1)
else:
ckp_copy_thread = None
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
assert args.loss, "Please specify loss to train a model"
# Build model and loss
model = task.build_model(args)
loss = task.build_loss(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
logger.info(model)
logger.info("task: {}".format(task.__class__.__name__))
logger.info("model: {}".format(model.__class__.__name__))
logger.info("loss: {}".format(loss.__class__.__name__))
logger.info(
"num. model params: {:,} (num. trained: {:,})".format(
sum(getattr(p, "_orig_size", p).numel() for p in model.parameters()),
sum(getattr(p, "_orig_size", p).numel() for p in model.parameters() if p.requires_grad),
)
)
# Build trainer
trainer = Trainer(args, task, model, loss)
logger.info(
"training on {} devices (GPUs)".format(
args.distributed_world_size
)
)
logger.info(
"batch size per device = {}".format(
args.batch_size,
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
args,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=False,
)
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while epoch_itr.next_epoch_idx <= max_epoch:
if lr <= args.stop_min_lr:
logger.info(
f"stopping training because current learning rate ({lr}) is smaller "
"than or equal to minimum learning rate "
f"(--stop-min-lr={args.stop_min_lr})"
)
break
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr, ckp_copy_thread)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=False,
)
train_meter.stop()
if ckp_copy_thread is not None:
ckp_copy_thread.close()
ckp_copy_thread.join()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss: float) -> bool:
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(
args, trainer: Trainer, task: tasks.UnicoreTask, epoch_itr, ckp_copy_thread
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir
if distributed_utils.is_master(args)
else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
logger.info("Start iterating over samples")
max_update = args.max_update or math.inf
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch, ckp_copy_thread
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(
args,
trainer: Trainer,
task: tasks.UnicoreTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
ckp_copy_thread,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = args.max_update or math.inf
# Stopping conditions (and an additional one based on validation loss later
# on)
should_stop = False
if num_updates >= max_update:
should_stop = True
logger.info(
f"Stopping training due to "
f"num_updates: {num_updates} >= max_update: {max_update}"
)
training_time_hours = trainer.cumulative_training_time() / (60 * 60)
if (
args.stop_time_hours > 0
and training_time_hours > args.stop_time_hours
):
should_stop = True
logger.info(
f"Stopping training due to "
f"cumulative_training_time: {training_time_hours} > "
f"stop_time_hours: {args.stop_time_hours} hour(s)"
)
do_save = (
(end_of_epoch and epoch_itr.epoch % args.save_interval == 0 and not args.no_epoch_checkpoints)
or should_stop
or (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0 and not args.no_epoch_checkpoints)
or should_stop
or (
args.validate_interval_updates > 0
and num_updates > 0
and num_updates % args.validate_interval_updates == 0
)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
should_stop |= should_stop_early(args, valid_losses[0])
# Save checkpoint
checkpoint_utils.save_checkpoint(
args, trainer, epoch_itr, valid_losses[0], ckp_copy_thread, do_save=(do_save or should_stop),
)
return valid_losses, should_stop
def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(
args,
trainer: Trainer,
task: tasks.UnicoreTask,
epoch_itr,
subsets: List[str],
) -> List[Optional[float]]:
"""Evaluate the model on the validation set(s) and return the losses."""
seed = None
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
seed = args.fixed_validation_seed
with utils.torch_seed(seed):
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(
shuffle=False, set_dataset_epoch=False # use a fixed valid set
)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir
if distributed_utils.is_master(args)
else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
logging_outputs = []
for i, sample in enumerate(progress):
if args.max_valid_steps is not None and i > args.max_valid_steps:
break
inner_logging_outputs = trainer.valid_step(sample)
logging_outputs.extend(inner_logging_outputs)
task.reduce_metrics(logging_outputs, trainer.get_loss(), subset)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
if args.best_checkpoint_metric in stats:
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(
args, trainer: Trainer, stats: Dict[str, Any]
) -> Dict[str, Any]:
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best") and args.best_checkpoint_metric in stats:
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[args.best_checkpoint_metric],
)
return stats
def cli_main(
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None
) -> None:
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
# modified from https://github.com/hpcaitech/FastFold/blob/main/benchmark/perf.py
import argparse
import os
import torch
import torch.nn as nn
from fastfold.distributed import init_dap
from fastfold.model.fastnn import Evoformer
def main():
parser = argparse.ArgumentParser(description='Evoformer Standalone Perf Benchmark')
parser.add_argument("--dap-size", default=1, type=int, help='batch size')
parser.add_argument('--batch-size', default=1, type=int, help='batch size')
parser.add_argument('--msa-length', default=128, type=int, help='Sequence Length of MSA')
parser.add_argument('--res-length',
default=256,
type=int,
help='Sequence Length of Residues')
parser.add_argument('--trials', default=50, type=int, help='Number of Trials to Execute')
parser.add_argument('--warmup-trials', default=5, type=int, help='Warmup Trials to discard')
parser.add_argument('--layers',
default=4,
type=int,
help='Evoformer Layers to Execute')
parser.add_argument('--cm', default=256, type=int, help='MSA hidden dimension')
parser.add_argument('--cz', default=128, type=int, help='Pair hidden dimension')
parser.add_argument('--heads', default=8, type=int, help='Number of Multihead Attention heads')
parser.add_argument('--openfold',
action='store_true',
help='Benchmark with Evoformer Implementation from OpenFold.')
parser.add_argument('--openfold-lma',
action='store_true',
help='set use_lma to True in openfold.')
parser.add_argument('--fwd', action='store_true', help='Only execute Fwd Pass.')
args = parser.parse_args()
init_dap(args.dap_size)
precision = torch.bfloat16
if args.dap_size > 1:
# (PyTorch issue) Currently All2All communication does not support the Bfloat16 datatype in PyTorch
precision = torch.float16
if not torch.cuda.is_available():
raise NotImplementedError('Running on CPU is not supported')
torch.manual_seed(42)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(42)
if args.openfold:
from openfold.model.evoformer import EvoformerBlock
class OpenFoldEvoformer(nn.Module):
def __init__(self, d_node, d_pair):
super(OpenFoldEvoformer, self).__init__()
self.d_node = d_node
self.d_pair = d_pair
self.c_hidden_msa_att = int(d_node / 8)
self.c_hidden_pair_att = int(d_pair / 4)
self.EvoformerBlock = EvoformerBlock(c_m=d_node,
c_z=d_pair,
c_hidden_msa_att=self.c_hidden_msa_att,
c_hidden_opm=self.c_hidden_msa_att,
c_hidden_mul=self.d_pair,
c_hidden_pair_att=self.c_hidden_pair_att,
no_heads_msa=8,
no_heads_pair=4,
transition_n=4,
msa_dropout=0.15,
pair_dropout=0.25,
inf=1e9,
eps=1e-10)
def forward(self, node, pair, node_mask, pair_mask):
node, pair = self.EvoformerBlock(node, pair, node_mask, pair_mask, use_lma=args.openfold_lma)
return node, pair
attn_layers = []
for idx in range(0, args.layers):
if args.openfold:
attn_layers.append(OpenFoldEvoformer(d_node=args.cm, d_pair=args.cz))
else:
attn_layers.append(Evoformer(d_node=args.cm, d_pair=args.cz))
attn_layers[idx].cuda()
attn_layers[idx].to(dtype=precision)
start_evt_fwd = []
start_evt_bwd = []
stop_evt_bwd = []
for recorded_trial in range(0, args.trials):
start_evt_fwd.append(torch.cuda.Event(enable_timing=True))
start_evt_bwd.append(torch.cuda.Event(enable_timing=True))
stop_evt_bwd.append(torch.cuda.Event(enable_timing=True))
inputs_node = torch.randn(args.batch_size,
args.msa_length // args.dap_size,
args.res_length,
args.cm,
dtype=precision,
device=torch.device("cuda")).requires_grad_(True)
inputs_pair = torch.randn(args.batch_size,
args.res_length // args.dap_size,
args.res_length,
args.cz,
dtype=precision,
device=torch.device("cuda")).requires_grad_(True)
node_mask = torch.ones((args.batch_size, args.msa_length, args.res_length),
dtype=precision,
device=torch.device("cuda")).requires_grad_(False)
pair_mask = torch.ones((args.batch_size, args.res_length, args.res_length),
dtype=precision,
device=torch.device("cuda")).requires_grad_(False)
total_used_mem_gb = 0
for trial in range(0, args.trials + args.warmup_trials):
layer_inputs = inputs_node, inputs_pair
evt_idx = trial - args.warmup_trials
torch.distributed.barrier()
torch.cuda.synchronize()
torch.cuda.reset_peak_memory_stats()
if evt_idx >= 0:
start_evt_fwd[evt_idx].record()
with torch.set_grad_enabled(not args.fwd):
for lyr_idx in range(0, args.layers):
layer_inputs = attn_layers[lyr_idx].forward(
*layer_inputs,
node_mask,
pair_mask,
)
torch.cuda.synchronize()
if evt_idx >= 0:
start_evt_bwd[evt_idx].record()
if not args.fwd:
s = layer_inputs[0].mean() + layer_inputs[1].mean()
s.backward()
torch.cuda.synchronize()
cur_cost_mem = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024
total_used_mem_gb += cur_cost_mem
if evt_idx >= 0:
stop_evt_bwd[evt_idx].record()
torch.cuda.synchronize()
elapsed_time_fwd = 0.0
elapsed_time_bwd = 0.0
for evt_idx in range(0, args.trials):
elapsed_time_fwd += start_evt_fwd[evt_idx].elapsed_time(start_evt_bwd[evt_idx])
elapsed_time_bwd += start_evt_bwd[evt_idx].elapsed_time(stop_evt_bwd[evt_idx])
print(
"Input: {:4d}, {:4d}, {:4d}, ({:4d} {:4d}), Fwd Time / Layer: {:.3f} ms, Bwd Time / Layer: {:.3f} ms, Memory cost {:.3f} GB".format(
args.batch_size,
args.msa_length,
args.res_length,
args.cm,
args.cz,
elapsed_time_fwd / (args.trials * args.layers),
elapsed_time_bwd / (args.trials * args.layers),
total_used_mem_gb / (args.trials + args.warmup_trials),
)
)
if __name__ == '__main__':
main()
import argparse
import os
import torch
import torch.nn as nn
import os
import sys
import pathlib
from tqdm import tqdm
from unifold.modules.evoformer import EvoformerIteration
from unifold.modules.attentions import gen_msa_attn_mask, gen_tri_attn_mask
class WrapEvoformerLayer(nn.Module):
def __init__(self, d_node, d_pair):
super(WrapEvoformerLayer, self).__init__()
self.d_node = d_node
self.d_pair = d_pair
self.c_hidden_msa_att = int(d_node / 8)
self.c_hidden_pair_att = int(d_pair / 4)
self.EvoformerIteration = EvoformerIteration(
d_msa=d_node,
d_pair=d_pair,
d_hid_msa_att=self.c_hidden_msa_att,
d_hid_opm=self.c_hidden_msa_att,
d_hid_mul=self.d_pair,
d_hid_pair_att=self.c_hidden_pair_att,
num_heads_msa=8,
num_heads_pair=4,
transition_n=4,
msa_dropout=0.15,
pair_dropout=0.25,
outer_product_mean_first=False,
inf=3e4,
eps=1e-5,
)
self.alphafold_original_mode()
def alphafold_original_mode(self):
def set_alphafold_original_mode(module):
if hasattr(module, "apply_alphafold_original_mode"):
module.apply_alphafold_original_mode()
if hasattr(module, "act"):
module.act = nn.ReLU()
self.apply(set_alphafold_original_mode)
def forward(
self,
node,
pair,
node_mask,
pair_mask,
msa_row_attn_mask: torch.Tensor,
msa_col_attn_mask: torch.Tensor,
tri_start_attn_mask: torch.Tensor,
tri_end_attn_mask: torch.Tensor,
chunk_size: int,
):
node, pair = self.EvoformerIteration(
node,
pair,
node_mask,
pair_mask,
msa_row_attn_mask,
msa_col_attn_mask,
tri_start_attn_mask,
tri_end_attn_mask,
chunk_size=chunk_size,
)
return node, pair
def main():
parser = argparse.ArgumentParser(description="Evoformer Standalone Perf Benchmark")
parser.add_argument("--batch-size", default=1, type=int, help="batch size")
parser.add_argument(
"--msa-length", default=128, type=int, help="Sequence Length of MSA"
)
parser.add_argument(
"--res-length", default=256, type=int, help="Sequence Length of Residues"
)
parser.add_argument(
"--trials", default=50, type=int, help="Number of Trials to Execute"
)
parser.add_argument(
"--warmup-trials", default=5, type=int, help="Warmup Trials to discard"
)
parser.add_argument(
"--layers", default=4, type=int, help="Evoformer Layers to Execute"
)
parser.add_argument(
"--chunk-size", default=None, type=int, help="Evoformer Layers to Execute"
)
parser.add_argument("--cm", default=256, type=int, help="MSA hidden dimension")
parser.add_argument("--cz", default=128, type=int, help="Pair hidden dimension")
parser.add_argument("--fwd", action="store_true", help="Only execute Fwd Pass.")
parser.add_argument(
"--fp16", action="store_true", help="Use fp16 for benchmark (for V100)"
)
args = parser.parse_args()
precision = torch.bfloat16
if args.fp16:
precision = torch.float16
if not torch.cuda.is_available():
raise NotImplementedError("Running on CPU is not supported")
torch.manual_seed(42)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(42)
attn_layers = []
for idx in range(0, args.layers):
attn_layers.append(WrapEvoformerLayer(d_node=args.cm, d_pair=args.cz))
attn_layers[idx].cuda()
attn_layers[idx].to(dtype=precision)
if args.fwd:
attn_layers[idx].eval()
start_evt_fwd = []
start_evt_bwd = []
stop_evt_bwd = []
for recorded_trial in range(0, args.trials):
start_evt_fwd.append(torch.cuda.Event(enable_timing=True))
start_evt_bwd.append(torch.cuda.Event(enable_timing=True))
stop_evt_bwd.append(torch.cuda.Event(enable_timing=True))
inputs_node = torch.randn(
args.batch_size,
args.msa_length,
args.res_length,
args.cm,
dtype=precision,
device=torch.device("cuda"),
).requires_grad_(True)
inputs_pair = torch.randn(
args.batch_size,
args.res_length,
args.res_length,
args.cz,
dtype=precision,
device=torch.device("cuda"),
).requires_grad_(True)
node_mask = torch.ones(
(args.batch_size, args.msa_length, args.res_length),
dtype=precision,
device=torch.device("cuda"),
).requires_grad_(False)
msa_raw_mask, msa_col_mask = gen_msa_attn_mask(node_mask, 3e4)
pair_mask = torch.ones(
(args.batch_size, args.res_length, args.res_length),
dtype=precision,
device=torch.device("cuda"),
).requires_grad_(False)
tri_start_mask, tri_end_mask = gen_tri_attn_mask(pair_mask, 3e4)
total_used_mem_gb = 0
for trial in range(0, args.trials + args.warmup_trials):
layer_inputs = inputs_node, inputs_pair
evt_idx = trial - args.warmup_trials
torch.cuda.synchronize()
torch.cuda.reset_peak_memory_stats()
if evt_idx >= 0:
start_evt_fwd[evt_idx].record()
with torch.set_grad_enabled(not args.fwd):
for lyr_idx in range(0, args.layers):
layer_inputs = attn_layers[lyr_idx].forward(
*layer_inputs,
node_mask,
pair_mask,
msa_raw_mask,
msa_col_mask,
tri_start_mask,
tri_end_mask,
chunk_size=args.chunk_size,
)
torch.cuda.synchronize()
if evt_idx >= 0:
start_evt_bwd[evt_idx].record()
if not args.fwd:
s = layer_inputs[0].mean() + layer_inputs[1].mean()
s.backward()
torch.cuda.synchronize()
cur_cost_mem = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024
total_used_mem_gb += cur_cost_mem
if evt_idx >= 0:
stop_evt_bwd[evt_idx].record()
torch.cuda.synchronize()
elapsed_time_fwd = 0.0
elapsed_time_bwd = 0.0
for evt_idx in range(0, args.trials):
elapsed_time_fwd += start_evt_fwd[evt_idx].elapsed_time(start_evt_bwd[evt_idx])
elapsed_time_bwd += start_evt_bwd[evt_idx].elapsed_time(stop_evt_bwd[evt_idx])
print(
" Input: {:4d}, {:4d}, {:4d}, ({:4d} {:4d}), Fwd Time / Layer: {:.3f} ms, Bwd Time / Layer: {:.3f} ms, Memory cost {:.3f} GB".format(
args.batch_size,
args.msa_length,
args.res_length,
args.cm,
args.cz,
elapsed_time_fwd / (args.trials * args.layers),
elapsed_time_bwd / (args.trials * args.layers),
total_used_mem_gb / (args.trials + args.warmup_trials),
)
)
if __name__ == "__main__":
main()
FROM dptechnology/unicore:latest-pytorch1.11.0-cuda11.3
# metainformation
LABEL org.opencontainers.image.version = "2.0.0"
LABEL org.opencontainers.image.authors = "DP Technology"
LABEL org.opencontainers.image.source = "https://github.com/dptech-corp/Uni-Fold"
LABEL org.opencontainers.image.licenses = "Apache License 2.0"
# Use bash to support string substitution.
SHELL ["/bin/bash", "-c"]
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
hmmer \
kalign
# Compile HHsuite from source.
RUN git clone --branch v3.3.0 https://github.com/soedinglab/hh-suite.git /tmp/hh-suite \
&& mkdir /tmp/hh-suite/build \
&& pushd /tmp/hh-suite/build \
&& cmake -DCMAKE_INSTALL_PREFIX=/opt/hhsuite .. \
&& make -j 4 && make install \
&& ln -s /opt/hhsuite/bin/* /usr/bin \
&& popd \
&& rm -rf /tmp/hh-suite
RUN ldconfig && \
apt-get clean && \
apt-get autoremove && \
rm -rf /var/lib/apt/lists/* /tmp/* && \
conda clean -ya
# Evaluation of Uni-Fold
Uni-Fold is evaluated on recently released Protein Data Bank (PDB) structures. This folder contains the ID of the evaluation set.
[monomer_list.json](./monomer_list.json) contains the IDs of 876 (301 unique) chains we used to evaluate Uni-Fold Monomer. Chains with identical sequences yet (slightly) different structures are grouped together. [multimer_list.json](./multimer_list.json) contains the PDB-IDs of 162 assemblies we used to evaluate Uni-Fold Multimer.
[
[
"7pkt_k"
],
[
"7tgh_TB"
],
[
"7tgh_FX"
],
[
"7tgh_TA"
],
[
"7pkt_d"
],
[
"7ost_AAA"
],
[
"7o97_B",
"7o97_A"
],
[
"7awv_B",
"7awv_A"
],
[
"7pkt_F"
],
[
"7pb4_K"
],
[
"7tgh_T8"
],
[
"7b5i_CB",
"7b5i_AB",
"7b5i_DB",
"7b5i_EB",
"7b5i_BB",
"7b5i_FB"
],
[
"7ri3_D",
"7ri3_C",
"7ri3_A",
"7ri3_B"
],
[
"7fi3_C",
"7fi3_A",
"7fi3_D",
"7fi3_B"
],
[
"7oio_A"
],
[
"7pyt_B",
"7pxp_C",
"7pxp_G",
"7yxm_D",
"7pxp_D",
"7yxm_B",
"7pxp_H",
"7pyt_D"
],
[
"7s5o_A",
"7s5o_C",
"7s5o_B"
],
[
"7n0j_L",
"7n0j_I",
"7n0j_F",
"7n0j_B",
"7n0j_G",
"7n0j_H",
"7n0j_J",
"7n0j_E",
"7n0j_K",
"7n0j_A",
"7n0j_C",
"7n0j_D"
],
[
"7eey_A",
"7eey_B",
"7eey_C",
"7eey_D"
],
[
"7pkt_E"
],
[
"7trw_A"
],
[
"7tgh_3A",
"7tgh_3a"
],
[
"7acw_A",
"7acw_C"
],
[
"7sxb_A"
],
[
"7qep_L1"
],
[
"7trv_A",
"7trv_C",
"7trv_D",
"7trv_B"
],
[
"7b8b_B",
"7b8b_A"
],
[
"7tgh_TX"
],
[
"7q4i_B",
"7q4i_A"
],
[
"7biz_B",
"7biz_A"
],
[
"7eqb_B",
"7eqb_A"
],
[
"7vw6_B"
],
[
"7r7m_A",
"7r7m_B"
],
[
"7b5i_FA",
"7b5i_AA",
"7b5i_DA",
"7b5i_EA",
"7b5i_BA",
"7b5i_CA"
],
[
"7s0k_A"
],
[
"7ocs_B",
"7ocs_D",
"7ocs_A",
"7ocs_C"
],
[
"7qry_B",
"7qry_A",
"7qry_C",
"7qry_D"
],
[
"7qp9_A",
"7qp9_B"
],
[
"7ews_B",
"7ews_A"
],
[
"7vkb_A",
"7vkc_A"
],
[
"7tgh_T2"
],
[
"7vsp_A",
"7vsp_C",
"7vsp_B"
],
[
"7nqd_A",
"7nqe_A",
"7nqd_B"
],
[
"7wdw_D",
"7wdw_C",
"7wdw_B",
"7wdw_A"
],
[
"7puk_C",
"7puj_A",
"7puk_A"
],
[
"7es4_A"
],
[
"7tj4_C",
"7tj4_A"
],
[
"7pb4_I"
],
[
"7vg5_A",
"7vg4_D",
"7vg4_A",
"7vg4_E",
"7vg5_B",
"7vg4_F",
"7vg4_C",
"7vg4_B"
],
[
"7pb9_A"
],
[
"7qhm_U",
"7qhm_H"
],
[
"7zgh_B",
"7zgh_A",
"7zgg_B",
"7zgg_A",
"7zgf_B",
"7zgf_A"
],
[
"7sa9_A",
"7sa9_B"
],
[
"7vua_A",
"7vua_B"
],
[
"7rc2_A",
"7rc6_A"
],
[
"7fg9_A",
"7fg9_B"
],
[
"7rd0_A"
],
[
"7qep_S7"
],
[
"7sp8_A",
"7sp9_A",
"7spa_A"
],
[
"7z0o_D"
],
[
"7tt9_B",
"7tt9_A",
"7tt9_D",
"7tt9_C"
],
[
"7n3v_A",
"7shw_B",
"7n3v_B",
"7shw_A"
],
[
"7tgh_AN"
],
[
"7ry6_A"
],
[
"7z5h_C",
"7z5g_B",
"7z5g_A",
"7z5g_C",
"7z5h_B",
"7z5g_D",
"7z5h_A",
"7z5h_D"
],
[
"8a3o_B",
"8a3o_A"
],
[
"7twa_D",
"7twa_B",
"7twa_A",
"7twa_C"
],
[
"7q04_E",
"7q05_E",
"7q04_D",
"7q05_D",
"7q05_F",
"7q04_F",
"7q06_E",
"7q06_F",
"7q06_D"
],
[
"7vw6_A"
],
[
"7v9h_A"
],
[
"7rpy_A"
],
[
"7lsv_A",
"7lsv_B"
],
[
"7ept_R"
],
[
"7eqc_E",
"7eqc_J",
"7eqc_F",
"7eqc_I"
],
[
"7rbp_B",
"7rbp_A"
],
[
"7vmt_F",
"7vmt_C",
"7vmt_A",
"7vmt_B",
"7vmt_E",
"7vmt_D"
],
[
"7fc0_B"
],
[
"7euu_B",
"7eut_A",
"7eus_B",
"7euu_A",
"7eut_B",
"7eus_A"
],
[
"7ywf_C",
"7ywe_C",
"7r5z_C",
"7r5z_A",
"7r5z_B",
"7ywf_B",
"7ywe_A",
"7ywe_B",
"7ywf_A"
],
[
"7ero_C",
"7ero_A",
"7ero_D",
"7ern_C",
"7ern_A",
"7ero_B",
"7ern_D",
"7ern_B"
],
[
"7nzz_B",
"7nzz_A"
],
[
"7rkc_A",
"7rkc_B"
],
[
"7u2s_A",
"7u2s_B"
],
[
"7zbo_C",
"7zbo_B",
"7zbo_D",
"7r09_A",
"7zbo_A"
],
[
"7aoo_C",
"7aoo_D",
"7aoo_B",
"7aoo_A",
"7aoj_A"
],
[
"7vo5_A",
"7vo4_A",
"7vo4_B"
],
[
"7sax_D",
"7sax_G",
"7sax_E",
"7sax_C",
"7sax_F"
],
[
"7wvt_A",
"7wwe_A"
],
[
"7q3a_A",
"7q3a_B"
],
[
"7pjo_AAA",
"7pjo_BBB"
],
[
"7tgh_A7"
],
[
"7tgh_A3"
],
[
"7qh2_F",
"7qh2_C"
],
[
"7tn3_A"
],
[
"7w5v_A",
"7w5s_A",
"7w5t_A"
],
[
"7adz_1A",
"7adz_1D",
"7adz_1C",
"7adz_1E",
"7adz_1B",
"7adz_1F"
],
[
"7thw_B",
"7thw_D",
"7thw_A",
"7thw_C"
],
[
"7aeb_A",
"7aef_E",
"7aeb_C",
"7aef_B",
"7aeb_F",
"7aef_D",
"7aef_A",
"7aeb_B",
"7aeb_E",
"7aeb_D",
"7aef_C",
"7aef_F"
],
[
"7pt2_A",
"7pt2_B"
],
[
"7t8l_B",
"7t8k_A",
"7t8l_A",
"7t8k_B"
],
[
"7ewi_C",
"7ewj_A",
"7ewi_B",
"7ewi_A",
"7ewj_D",
"7ewj_H",
"7ewj_B",
"7ewi_D",
"7ewj_E",
"7ewj_G"
],
[
"7nyq_B",
"7nyq_A"
],
[
"7pkt_J"
],
[
"7tb5_A"
],
[
"7xkg_E",
"7xkg_C",
"7xkg_D",
"7xkg_A",
"7xkg_B",
"7xkg_F"
],
[
"7tfm_A"
],
[
"7z0o_E"
],
[
"7p9q_A",
"7p9q_F",
"7p9q_D",
"7p9q_B",
"7p9q_C",
"7p9q_E"
],
[
"7tgh_4L"
],
[
"7ekd_A"
],
[
"7lvf_A"
],
[
"7ot9_A",
"7ot9_C",
"7ot9_B",
"7ot9_E",
"7ot9_D"
],
[
"7ea4_A",
"7ea4_B"
],
[
"7sj2_A",
"7s6n_A",
"7sj2_B",
"7s6n_B"
],
[
"7r5s_U"
],
[
"7pkt_S"
],
[
"7vkb_B"
],
[
"7ly5_B"
],
[
"7z0o_F"
],
[
"7pkt_q"
],
[
"7pp8_F",
"7pp3_A",
"7pp8_A",
"7pp8_B",
"7pu6_B",
"7pu6_E",
"7pp8_E",
"7pp8_C",
"7pp8_G",
"7pu6_A",
"7pp8_D",
"7pu6_D",
"7pp3_D",
"7pu6_C",
"7pu6_F",
"7pp3_B",
"7pu6_H",
"7pp8_H",
"7pp3_C",
"7pu6_G"
],
[
"7pp2_A"
],
[
"7f4p_A",
"7f4n_D",
"7f4n_C",
"7f4l_D",
"7f4l_C"
],
[
"7tgh_AB"
],
[
"7zcl_B"
],
[
"7qyi_A"
],
[
"7ae0_3b",
"7adz_1a",
"7aef_l",
"7ae0_4b",
"7ae0_3d",
"7aef_k",
"7ae0_4d",
"7ae0_4e",
"7aef_m",
"7adz_2e",
"7ae0_3e",
"7ae0_3a",
"7adz_1d",
"7ae0_4f",
"7adz_1c",
"7aeb_k",
"7aeb_n",
"7adz_1e",
"7adz_2d",
"7ae0_5c",
"7ae0_5d",
"7aef_n",
"7ae0_4a",
"7ae0_4c",
"7aeb_p",
"7ae0_3c",
"7adz_1b",
"7aef_p",
"7adz_2a",
"7adz_2f",
"7adz_1f",
"7ae0_5a",
"7ae0_5f",
"7aeb_m",
"7ae0_3f",
"7adz_2b",
"7aeb_o",
"7adz_2c",
"7ae0_5b",
"7aeb_l",
"7aef_o",
"7ae0_5e"
],
[
"7e8r_A",
"7edb_A",
"7edb_B"
],
[
"7qep_MS"
],
[
"7msn_B",
"7msn_A"
],
[
"7rfv_A"
],
[
"7e7h_A",
"7e7h_B"
],
[
"7lxk_A"
],
[
"7r8g_A"
],
[
"7vty_A"
],
[
"7sez_A",
"7sf0_A",
"7t7h_A",
"7t7h_B"
],
[
"7pcs_A",
"7pcs_C"
],
[
"7o16_C",
"7o15_C",
"7o15_B",
"7o14_C",
"7o14_B",
"7o16_B"
],
[
"7xc6_D",
"7xc6_B",
"7xc6_A",
"7xc6_C"
],
[
"8a0k_C",
"8a0k_D",
"8a0k_B",
"8a0k_A"
],
[
"7ewc_D",
"7ewc_A",
"7ewc_B",
"7ewc_C"
],
[
"7pkt_t"
],
[
"7pt5_A"
],
[
"7b5i_FE",
"7b5i_ED",
"7b5i_BD",
"7b5i_DE",
"7b5i_CE",
"7b5i_BE",
"7b5i_AE",
"7b5i_EE",
"7b5i_FD",
"7b5i_CD",
"7b5i_AD",
"7b5i_DD"
],
[
"7ek1_A",
"7ek2_A"
],
[
"7qep_O1"
],
[
"7n1j_B",
"7n1j_D"
],
[
"7r5s_O"
],
[
"7mni_C",
"7mni_A"
],
[
"7vsq_B",
"7vsq_A",
"7vsq_C"
],
[
"7pkt_y"
],
[
"7tgh_B6"
],
[
"7vkz_A",
"7vkz_B"
],
[
"7wwo_B",
"7wwo_A",
"7wwn_A",
"7wrk_A"
],
[
"7fdf_A"
],
[
"7tgh_BL"
],
[
"7tgh_T7"
],
[
"7o5y_C",
"7o5y_B",
"7o5y_A",
"7o5y_D"
],
[
"7r5s_L"
],
[
"7pku_B"
],
[
"7tgh_A9"
],
[
"7txp_A",
"7txs_A",
"7txq_A"
],
[
"7tgh_BM"
],
[
"7f5i_A"
],
[
"7uxg_A"
],
[
"7en5_A",
"7en7_A"
],
[
"7tgh_P1"
],
[
"7ovp_A",
"7ovp_B"
],
[
"7ek3_A"
],
[
"7qoc_A",
"7qod_A",
"7qod_B",
"7qoc_B"
],
[
"7pkt_P"
],
[
"7q21_k",
"7q21_K",
"7qhm_Y",
"7qhm_L"
],
[
"7acx_D",
"7acx_B"
],
[
"7t26_A",
"7t27_A"
],
[
"7tgh_6"
],
[
"7tgh_A6"
],
[
"7rxq_A",
"7rxe_A"
],
[
"7xc6_E"
],
[
"7tgh_A5"
],
[
"7vw0_A",
"7vw0_B"
],
[
"7dz9_C",
"7dz9_D"
],
[
"7mqq_A"
],
[
"7qih_A",
"7qih_C"
],
[
"7r5y_D",
"7r5y_F",
"7r5y_A",
"7r5y_E",
"7r5y_C",
"7r5y_B"
],
[
"7tgh_T3"
],
[
"7os2_C"
],
[
"7e85_B",
"7e85_A",
"7e85_C"
],
[
"7txn_B",
"7txo_A",
"7txn_A",
"7txo_B",
"7txm_A",
"7txm_B"
],
[
"7tgh_S3"
],
[
"7vtg_C",
"7vtg_A",
"7vtg_D",
"7vtg_B"
],
[
"7tgh_3H",
"7tgh_3h"
],
[
"7tgh_T5"
],
[
"7w5m_A"
],
[
"7tgh_S4"
],
[
"7pkt_K"
],
[
"7eg5_B",
"7eg5_A"
],
[
"7sjy_A",
"7sjy_B"
],
[
"7q21_V",
"7q21_v",
"7qhm_K",
"7qhm_X"
],
[
"7aef_r",
"7aef_s",
"7aef_q"
],
[
"7sba_H"
],
[
"7tgh_B4"
],
[
"7wwf_C",
"7wwf_A",
"7wwf_D",
"7wwf_B",
"7wwf_F",
"7wwf_E"
],
[
"7aef_R",
"7aef_Q",
"7aef_M",
"7aeb_N",
"7aef_N",
"7aeb_P",
"7aeb_Q",
"7aef_P",
"7aeb_M",
"7aeb_R",
"7aeb_O",
"7aef_O"
],
[
"7st9_G",
"7stb_G"
],
[
"7x0f_A",
"7x17_C",
"7x0f_D",
"7x17_D",
"7x17_A",
"7x17_B",
"7x0f_B",
"7x0f_C",
"7x0e_A"
],
[
"7mhu_A",
"7mhu_B"
],
[
"7oa8_A"
],
[
"7qs4_A",
"7qs4_C",
"7qs4_B",
"7qs4_D"
],
[
"7qep_C2"
],
[
"7tgh_S5"
],
[
"7rft_A",
"7rft_B"
],
[
"7drh_A"
],
[
"7tgh_C4"
],
[
"7tzv_M",
"7tzv_A"
],
[
"7tvy_C",
"7tvy_D",
"7tvy_B",
"7tvy_A"
],
[
"7vjs_A",
"7vjv_A"
],
[
"7ta5_A"
],
[
"7stv_A",
"7stt_A",
"7stu_A"
],
[
"7pkt_R"
],
[
"7u10_C",
"7x84_B",
"7u13_A",
"7u16_A",
"7u13_C",
"7u13_B",
"7x84_C",
"7u15_B",
"7u15_C",
"7u16_B",
"7u16_C",
"7x84_A",
"7u10_B",
"7u10_A",
"7u15_A"
],
[
"7esi_A"
],
[
"7u18_B",
"7u18_A",
"7u17_B",
"7u17_C",
"7u18_C",
"7u17_A"
],
[
"7qvb_A",
"7qvb_B"
],
[
"7tgh_3b",
"7tgh_3B"
],
[
"7fc0_F",
"7fc0_C"
],
[
"7w6b_A"
],
[
"7nek_A"
],
[
"7dwc_D",
"7dwc_A",
"7dwc_C",
"7dwc_B"
],
[
"7r9b_A"
],
[
"7tgh_T9"
],
[
"7s5l_A"
],
[
"7od9_C",
"7od9_F"
],
[
"7t8i_A",
"7t8i_B"
],
[
"7r5s_I"
],
[
"7mkk_B",
"7mkk_G",
"7mkk_E",
"7mkk_A"
],
[
"8d27_B",
"8d27_A"
],
[
"7qhm_J",
"7qhm_W"
],
[
"7tgh_2B"
],
[
"7tzh_B",
"7tzh_D"
],
[
"7yzu_A",
"7ofy_A",
"7qhv_AAA",
"7ofy_B",
"7nbz_B",
"7nbz_C",
"7nbz_A",
"7yzs_AAA",
"7qhv_BBB"
],
[
"7p37_F",
"7p37_L",
"7p37_G",
"7p37_D",
"7p37_K",
"7p37_H",
"7p37_I",
"7p37_J",
"7p37_B",
"7p37_A",
"7p37_C",
"7p37_E"
],
[
"7ejg_C"
],
[
"7w3r_A",
"7w3r_B"
],
[
"7x8v_A",
"7x8v_D",
"7x8v_F",
"7x8v_E",
"7x8v_C",
"7x8v_B"
],
[
"7p4l_C",
"7p4l_A",
"7p4l_B"
],
[
"7pkt_I"
],
[
"7mvy_B",
"7mvz_B"
],
[
"7pkt_n"
],
[
"7o0x_C2",
"7o0w_C2"
],
[
"7vu7_B",
"7vu7_A"
],
[
"7qep_MD"
],
[
"7poi_D",
"7poi_C"
],
[
"7pkt_L"
],
[
"7pkt_O"
],
[
"7vni_A",
"7vnh_A",
"7vnh_B",
"7vni_B",
"7vna_A"
],
[
"7bgs_B",
"7bgs_A",
"7bnx_A",
"7bnx_B"
],
[
"7bi4_A",
"7bi9_A",
"7bi6_A"
],
[
"7tgh_B3"
],
[
"7tgh_AL"
],
[
"7q21_h",
"7q21_H",
"7qhm_V",
"7qhm_I"
],
[
"7tgh_T4"
],
[
"7vbq_B",
"7vbr_B",
"7vbr_D",
"7vbr_C",
"7vbr_A"
],
[
"7s5c_J",
"7s5c_C",
"7s5c_F",
"7s5c_H",
"7s5c_G",
"7s5c_A",
"7s5c_I",
"7s5c_B",
"7s5c_E",
"7s5c_D"
],
[
"7n40_B"
],
[
"7exo_A",
"7elv_E"
],
[
"7w7b_L",
"7w7b_F",
"7w78_B",
"7w7b_J",
"7w7c_B",
"7w7b_B",
"7w7b_D",
"7w7b_H",
"7w79_B"
],
[
"7mqz_A"
],
[
"7sxi_A"
],
[
"7yxm_A",
"7yxm_C",
"7pyt_C",
"7pxp_B",
"7pxp_F",
"7pxp_E",
"7pyt_A",
"7pxp_A"
],
[
"7tgh_3I",
"7tgh_3i"
],
[
"7v4o_A",
"7v4o_B"
],
[
"7qdv_A",
"7zjc_A"
],
[
"7o15_D",
"7o16_E",
"7o14_D",
"7o16_D",
"7o14_E",
"7o15_E"
],
[
"7n5d_G",
"7n5d_E",
"7n5e_D",
"7n5d_C",
"7n5e_E",
"7n5e_G",
"7n5g_B",
"7n5e_B",
"7n5d_D",
"7n5d_F",
"7n5d_A",
"7n5e_A",
"7n5e_F",
"7n5e_C",
"7n5d_B",
"7n5f_A"
],
[
"7f8m_C",
"7f8m_B",
"7f8m_D",
"7f8m_F",
"7f8m_E",
"7f8m_A"
],
[
"7qs0_C",
"7qs0_B",
"7qs0_A"
],
[
"7tgh_2"
],
[
"7pkt_B"
],
[
"7tgh_T1"
],
[
"7naz_A"
],
[
"7f28_C",
"7f28_A"
],
[
"7pd2_A",
"7pd2_B",
"7pd1_B",
"7pd1_A"
],
[
"7pkt_M"
],
[
"7epq_A",
"7epq_B"
],
[
"7sgn_L",
"7sgn_A",
"7sgn_G",
"7sgo_A",
"7sgo_D",
"7sgn_F",
"7sgn_J",
"7sgn_I",
"7sgo_C",
"7sgn_B",
"7sgo_B",
"7sgn_C",
"7sgn_E",
"7sgo_F",
"7sgn_K",
"7sgp_A",
"7sgn_H",
"7sgo_E",
"7sgn_D"
],
[
"7vgm_A"
],
[
"7pkt_G"
],
[
"7pze_B",
"7pze_A"
],
[
"7tgh_AM"
],
[
"7nde_A"
],
[
"7qzq_A",
"7qzq_B"
],
[
"7pwo_M2",
"7pwg_M"
],
[
"7qhy_A",
"7qhy_B"
],
[
"7v09_B",
"7v09_A"
],
[
"7tgh_3J",
"7tgh_3j"
],
[
"7tgh_B8"
],
[
"7tgh_5"
],
[
"7ste_A",
"7stb_A",
"7st9_A"
],
[
"7adz_0C",
"7adz_0D",
"7adz_0F",
"7adz_0B",
"7adz_0E",
"7adz_0A"
],
[
"7tem_B",
"7tem_A"
],
[
"7tgh_B9"
]
]
\ No newline at end of file
[
"7zjc",
"7tt9",
"7rfv",
"7trw",
"7mx1",
"7nto",
"7tn8",
"7f74",
"7awv",
"7f8f",
"7tpg",
"7eg5",
"8a3o",
"7toc",
"7pp2",
"7qcs",
"7fdf",
"7pc5",
"7eni",
"7txs",
"7s4g",
"7wwn",
"7rkc",
"7vmc",
"7msn",
"7qqn",
"7lo8",
"7mkv",
"7qoe",
"7r1c",
"7b1d",
"7pc7",
"7tj4",
"7vbq",
"7tuz",
"7pc8",
"7ewi",
"7fc0",
"7ne9",
"7tze",
"7esi",
"7dz9",
"7tae",
"7e11",
"7wze",
"7pb4",
"7o1f",
"7yxw",
"7x87",
"7fbl",
"7e12",
"7vtf",
"7rcz",
"7bgs",
"7ly6",
"7obv",
"7eqb",
"7ob7",
"7obd",
"7pc3",
"7p8x",
"7rrg",
"7rej",
"7u13",
"7dtr",
"7vcr",
"7n1j",
"7epq",
"7ewe",
"7tj1",
"7qoc",
"7l1b",
"7zb0",
"7qnq",
"7wa4",
"7rbp",
"7eid",
"7aoo",
"7wwf",
"7t8i",
"7vw0",
"7vuf",
"8d27",
"7qqy",
"7eru",
"7e85",
"7f4s",
"7zcv",
"7en6",
"7tzv",
"7u8t",
"7yxm",
"7txm",
"7stv",
"7t6t",
"7qpc",
"7u2s",
"7ei2",
"7ews",
"7xkg",
"7eey",
"7mq2",
"7tjl",
"7qih",
"7f8m",
"7ero",
"7t6u",
"7wq3",
"7a67",
"7acz",
"7ovp",
"7f7i",
"7v4m",
"7qii",
"7vwk",
"7f4n",
"7wsj",
"7wq4",
"7tb6",
"7fga",
"7ejx",
"7vo4",
"7pvh",
"7r9b",
"7p6f",
"7wdw",
"7sf8",
"7n29",
"7r09",
"7vg5",
"7vkb",
"7vu7",
"7od9",
"7o97",
"7ywe",
"7oct",
"7oh2",
"7r7m",
"7p1c",
"7vni",
"7sj2",
"7xeb",
"7e8r",
"7tcu",
"7e0z",
"7aam",
"7qvb",
"7q4i",
"7qwg",
"7acw",
"7eew",
"7aan",
"7eys",
"7vvv",
"7ep7",
"7ewj",
"7sp6",
"7vb4",
"7r5y",
"7trv",
"7mk3",
"7twa"
]
\ No newline at end of file
{
"8d27_A": [
"8d27_A"
]
}
\ No newline at end of file
{
"8d27_A": 1.0
}
\ No newline at end of file
["I", "I"]
\ No newline at end of file
{
"8d27": {
"symbol": "C2",
"stoi": [
"A2"
],
"chains": [
"A",
"B"
],
"opers": [
"I",
"I"
]
}
}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment