Unverified Commit b76ac11c authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[DGL-LifeSci] Release Preparation (CI, Docker, Conda build) (#1399)



* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* add docs

* Fix style

* Fix lint

* Bug fix

* Fix test

* Update

* Update

* Update

* Update
Co-authored-by: default avatarMinjie Wang <wmjlyjemaine@gmail.com>
parent e4cc8185
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../python'))
# -- Project information -----------------------------------------------------
project = 'DGL-LifeSci'
copyright = '2020, DGL Team'
author = 'DGL Team'
import dgllife
version = dgllife.__version__
release = dgllife.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.graphviz',
'sphinx_gallery.gen_gallery',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'dgllifedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'dgllife.tex', 'DGL-LifeSci Documentation',
'DGL Team', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dgllife', 'DGL-LifeSci Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'dgllife', 'DGL-LifeSci Documentation',
author, 'dgllife', 'Application library for XXXXXXXXXXXXXXXX.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
autosummary_generate = True
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.org/', None),
'networkx' : ('https://networkx.github.io/documentation/stable', None),
}
# sphinx gallery configurations
from sphinx_gallery.sorting import FileNameSortKey
examples_dirs = [] # path to find sources
gallery_dirs = [] # path to generate docs
reference_url = {
'dgllife' : None,
'numpy': 'http://docs.scipy.org/doc/numpy/',
'scipy': 'http://docs.scipy.org/doc/scipy/reference',
'matplotlib': 'http://matplotlib.org/',
'networkx' : 'https://networkx.github.io/documentation/stable',
}
sphinx_gallery_conf = {
'backreferences_dir' : 'generated/backreferences',
'doc_module' : ('dgllife', 'numpy'),
'examples_dirs' : examples_dirs,
'gallery_dirs' : gallery_dirs,
'within_subsection_order' : FileNameSortKey,
'filename_pattern' : '.py',
'download_all_examples' : False,
}
DGL-LifeSci: A GNN Package for Chemistry and Molecular Applications
===========================================================================================
Blahlah ...
Get Started
------------
You could borrow some from the README page.
API Reference
---------------
The highest level breakdown. What are the APIs for?
.. toctree::
:maxdepth: 1
:caption: Get Started
:hidden:
:glob:
get_started
.. toctree::
:maxdepth: 2
:caption: API Reference
:hidden:
:glob:
api/data
api/model
api/model.gnn
api/model.zoo
api/model.readout
api/utils
Index
-----
* :ref:`genindex`
__version__ = '0.2.0' """DGL-based package for applications in life science."""
from . import data
from . import model
from . import utils
from .libinfo import __version__
"""Information for the library."""
__version__ = '0.2.0'
"""AttentiveFP""" """AttentiveFP"""
# pylint: disable= no-member, arguments-differ, invalid-name
import dgl.function as fn import dgl.function as fn
import torch import torch
import torch.nn as nn import torch.nn as nn
...@@ -8,6 +9,7 @@ from dgl.nn.pytorch import edge_softmax ...@@ -8,6 +9,7 @@ from dgl.nn.pytorch import edge_softmax
__all__ = ['AttentiveFPGNN'] __all__ = ['AttentiveFPGNN']
# pylint: disable=W0221, C0103, E1101
class AttentiveGRU1(nn.Module): class AttentiveGRU1(nn.Module):
"""Update node features with attention and GRU. """Update node features with attention and GRU.
...@@ -298,7 +300,7 @@ class AttentiveFPGNN(nn.Module): ...@@ -298,7 +300,7 @@ class AttentiveFPGNN(nn.Module):
self.init_context = GetContext(node_feat_size, edge_feat_size, graph_feat_size, dropout) self.init_context = GetContext(node_feat_size, edge_feat_size, graph_feat_size, dropout)
self.gnn_layers = nn.ModuleList() self.gnn_layers = nn.ModuleList()
for i in range(num_layers - 1): for _ in range(num_layers - 1):
self.gnn_layers.append(GNNLayer(graph_feat_size, graph_feat_size, dropout)) self.gnn_layers.append(GNNLayer(graph_feat_size, graph_feat_size, dropout))
def forward(self, g, node_feats, edge_feats): def forward(self, g, node_feats, edge_feats):
......
"""Graph Attention Networks""" """Graph Attention Networks"""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
...@@ -6,6 +7,7 @@ from dgl.nn.pytorch import GATConv ...@@ -6,6 +7,7 @@ from dgl.nn.pytorch import GATConv
__all__ = ['GAT'] __all__ = ['GAT']
# pylint: disable=W0221
class GATLayer(nn.Module): class GATLayer(nn.Module):
r"""Single GAT layer from `Graph Attention Networks <https://arxiv.org/abs/1710.10903>`__ r"""Single GAT layer from `Graph Attention Networks <https://arxiv.org/abs/1710.10903>`__
......
"""Graph Convolutional Networks.""" """Graph Convolutional Networks."""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
...@@ -6,6 +7,7 @@ from dgl.nn.pytorch import GraphConv ...@@ -6,6 +7,7 @@ from dgl.nn.pytorch import GraphConv
__all__ = ['GCN'] __all__ = ['GCN']
# pylint: disable=W0221, C0103
class GCNLayer(nn.Module): class GCNLayer(nn.Module):
r"""Single GCN layer from `Semi-Supervised Classification with Graph Convolutional Networks r"""Single GCN layer from `Semi-Supervised Classification with Graph Convolutional Networks
<https://arxiv.org/abs/1609.02907>`__ <https://arxiv.org/abs/1609.02907>`__
...@@ -33,7 +35,7 @@ class GCNLayer(nn.Module): ...@@ -33,7 +35,7 @@ class GCNLayer(nn.Module):
self.activation = activation self.activation = activation
self.graph_conv = GraphConv(in_feats=in_feats, out_feats=out_feats, self.graph_conv = GraphConv(in_feats=in_feats, out_feats=out_feats,
norm=False, activation=activation) norm='none', activation=activation)
self.dropout = nn.Dropout(dropout) self.dropout = nn.Dropout(dropout)
self.residual = residual self.residual = residual
......
"""MGCN""" """MGCN"""
# pylint: disable= no-member, arguments-differ, invalid-name
import dgl.function as fn import dgl.function as fn
import torch import torch
import torch.nn as nn import torch.nn as nn
...@@ -7,6 +8,7 @@ from .schnet import RBFExpansion ...@@ -7,6 +8,7 @@ from .schnet import RBFExpansion
__all__ = ['MGCNGNN'] __all__ = ['MGCNGNN']
# pylint: disable=W0221, E1101
class EdgeEmbedding(nn.Module): class EdgeEmbedding(nn.Module):
"""Module for embedding edges. """Module for embedding edges.
...@@ -232,7 +234,7 @@ class MGCNGNN(nn.Module): ...@@ -232,7 +234,7 @@ class MGCNGNN(nn.Module):
self.rbf = RBFExpansion(high=cutoff, gap=gap) self.rbf = RBFExpansion(high=cutoff, gap=gap)
self.gnn_layers = nn.ModuleList() self.gnn_layers = nn.ModuleList()
for i in range(n_layers): for _ in range(n_layers):
self.gnn_layers.append(MultiLevelInteraction(feats, len(self.rbf.centers))) self.gnn_layers.append(MultiLevelInteraction(feats, len(self.rbf.centers)))
def forward(self, g, node_types, edge_dists): def forward(self, g, node_types, edge_dists):
......
"""MPNN""" """MPNN"""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
...@@ -6,6 +7,7 @@ from dgl.nn.pytorch import NNConv ...@@ -6,6 +7,7 @@ from dgl.nn.pytorch import NNConv
__all__ = ['MPNNGNN'] __all__ = ['MPNNGNN']
# pylint: disable=W0221
class MPNNGNN(nn.Module): class MPNNGNN(nn.Module):
"""MPNN. """MPNN.
...@@ -69,7 +71,7 @@ class MPNNGNN(nn.Module): ...@@ -69,7 +71,7 @@ class MPNNGNN(nn.Module):
node_feats = self.project_node_feats(node_feats) # (V, node_out_feats) node_feats = self.project_node_feats(node_feats) # (V, node_out_feats)
hidden_feats = node_feats.unsqueeze(0) # (1, V, node_out_feats) hidden_feats = node_feats.unsqueeze(0) # (1, V, node_out_feats)
for i in range(self.num_step_message_passing): for _ in range(self.num_step_message_passing):
node_feats = F.relu(self.gnn_layer(g, node_feats, edge_feats)) node_feats = F.relu(self.gnn_layer(g, node_feats, edge_feats))
node_feats, hidden_feats = self.gru(node_feats.unsqueeze(0), hidden_feats) node_feats, hidden_feats = self.gru(node_feats.unsqueeze(0), hidden_feats)
node_feats = node_feats.squeeze(0) node_feats = node_feats.squeeze(0)
......
# -*- coding:utf-8 -*- # -*- coding:utf-8 -*-
# pylint: disable=C0103, C0111, W0621 # pylint: disable=C0103, C0111, W0621, W0221, E1102, E1101
"""SchNet""" """SchNet"""
import numpy as np import numpy as np
import torch import torch
...@@ -10,7 +10,7 @@ from dgl.nn.pytorch import CFConv ...@@ -10,7 +10,7 @@ from dgl.nn.pytorch import CFConv
__all__ = ['SchNetGNN'] __all__ = ['SchNetGNN']
class RBFExpansion(nn.Module): class RBFExpansion(nn.Module):
"""Expand distances between nodes by radial basis functions. r"""Expand distances between nodes by radial basis functions.
.. math:: .. math::
\exp(- \gamma * ||d - \mu||^2) \exp(- \gamma * ||d - \mu||^2)
......
"""WLN""" """WLN"""
import dgl.function as fn # pylint: disable= no-member, arguments-differ, invalid-name
import math import math
import dgl.function as fn
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
...@@ -10,7 +11,7 @@ from torch.nn import Parameter ...@@ -10,7 +11,7 @@ from torch.nn import Parameter
__all__ = ['WLN'] __all__ = ['WLN']
class WLNLinear(nn.Module): class WLNLinear(nn.Module):
"""Linear layer for WLN r"""Linear layer for WLN
Let stddev be Let stddev be
...@@ -50,12 +51,12 @@ class WLNLinear(nn.Module): ...@@ -50,12 +51,12 @@ class WLNLinear(nn.Module):
bound = 1 / math.sqrt(fan_in) bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound) nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input): def forward(self, feats):
"""Applies the layer. """Applies the layer.
Parameters Parameters
---------- ----------
input : float32 tensor of shape (N, *, in_feats) feats : float32 tensor of shape (N, *, in_feats)
N for the number of samples, * for any additional dimensions. N for the number of samples, * for any additional dimensions.
Returns Returns
...@@ -63,7 +64,7 @@ class WLNLinear(nn.Module): ...@@ -63,7 +64,7 @@ class WLNLinear(nn.Module):
float32 tensor of shape (N, *, out_feats) float32 tensor of shape (N, *, out_feats)
Result of the layer. Result of the layer.
""" """
return F.linear(input, self.weight, self.bias) return F.linear(feats, self.weight, self.bias)
def extra_repr(self): def extra_repr(self):
"""Return a description of the layer.""" """Return a description of the layer."""
...@@ -133,7 +134,7 @@ class WLN(nn.Module): ...@@ -133,7 +134,7 @@ class WLN(nn.Module):
Updated node representations. Updated node representations.
""" """
node_feats = self.project_node_in_feats(node_feats) node_feats = self.project_node_in_feats(node_feats)
for l in range(self.n_layers): for _ in range(self.n_layers):
g = g.local_var() g = g.local_var()
g.ndata['hv'] = node_feats g.ndata['hv'] = node_feats
g.apply_edges(fn.copy_src('hv', 'he_src')) g.apply_edges(fn.copy_src('hv', 'he_src'))
......
"""Atomic Convolutional Networks for Predicting Protein-Ligand Binding Affinity""" """Atomic Convolutional Networks for Predicting Protein-Ligand Binding Affinity"""
# pylint: disable=C0103, C0123 # pylint: disable=C0103, C0123, W0221, E1101, R1721
import itertools import itertools
import numpy as np import numpy as np
import torch import torch
......
"""AttentiveFP""" """AttentiveFP"""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch.nn as nn import torch.nn as nn
from ..gnn import AttentiveFPGNN from ..gnn import AttentiveFPGNN
...@@ -6,6 +7,7 @@ from ..readout import AttentiveFPReadout ...@@ -6,6 +7,7 @@ from ..readout import AttentiveFPReadout
__all__ = ['AttentiveFPPredictor'] __all__ = ['AttentiveFPPredictor']
# pylint: disable=W0221
class AttentiveFPPredictor(nn.Module): class AttentiveFPPredictor(nn.Module):
"""AttentiveFP for regression and classification on graphs. """AttentiveFP for regression and classification on graphs.
......
# pylint: disable=C0103, W0622, R1710, W0104 # pylint: disable=C0103, W0622, R1710, W0104, E1101, W0221, C0411
""" """
Learning Deep Generative Models of Graphs Learning Deep Generative Models of Graphs
https://arxiv.org/pdf/1803.03324.pdf https://arxiv.org/pdf/1803.03324.pdf
......
"""GAT-based model for regression and classification on graphs.""" """GAT-based model for regression and classification on graphs."""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F
from .mlp_predictor import MLPPredictor from .mlp_predictor import MLPPredictor
from ..gnn.gat import GAT from ..gnn.gat import GAT
from ..readout.weighted_sum_and_max import WeightedSumAndMax from ..readout.weighted_sum_and_max import WeightedSumAndMax
# pylint: disable=W0221
class GATPredictor(nn.Module): class GATPredictor(nn.Module):
r"""GAT-based model for regression and classification on graphs. r"""GAT-based model for regression and classification on graphs.
...@@ -65,8 +66,8 @@ class GATPredictor(nn.Module): ...@@ -65,8 +66,8 @@ class GATPredictor(nn.Module):
n_tasks : int n_tasks : int
Number of tasks, which is also the output size. Default to 1. Number of tasks, which is also the output size. Default to 1.
""" """
def __init__(self, in_feats, hidden_feats=None, num_heads=None, feat_drops=None, attn_drops=None, def __init__(self, in_feats, hidden_feats=None, num_heads=None, feat_drops=None,
alphas=None, residuals=None, agg_modes=None, activations=None, attn_drops=None, alphas=None, residuals=None, agg_modes=None, activations=None,
classifier_hidden_feats=128, classifier_dropout=0., n_tasks=1): classifier_hidden_feats=128, classifier_dropout=0., n_tasks=1):
super(GATPredictor, self).__init__() super(GATPredictor, self).__init__()
......
"""GCN-based model for regression and classification on graphs.""" """GCN-based model for regression and classification on graphs."""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch.nn as nn import torch.nn as nn
from .mlp_predictor import MLPPredictor from .mlp_predictor import MLPPredictor
from ..gnn.gcn import GCN from ..gnn.gcn import GCN
from ..readout.weighted_sum_and_max import WeightedSumAndMax from ..readout.weighted_sum_and_max import WeightedSumAndMax
# pylint: disable=W0221
class GCNPredictor(nn.Module): class GCNPredictor(nn.Module):
"""GCN-based model for regression and classification on graphs. """GCN-based model for regression and classification on graphs.
......
# pylint: disable=C0111, C0103, E1101, W0611, W0612, W0703, C0200, R1710 # pylint: disable=C0111, C0103, E1101, W0611, W0612, W0703, C0200, R1710, I1101, R1721
import rdkit.Chem as Chem
from collections import defaultdict from collections import defaultdict
import rdkit.Chem as Chem
from rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers from rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers
from scipy.sparse import csr_matrix from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree from scipy.sparse.csgraph import minimum_spanning_tree
......
# pylint: disable=C0111, C0103, E1101, W0611, W0612, W1508 # pylint: disable=C0111, C0103, E1101, W0611, W0612, W1508, I1101, W0221
# pylint: disable=redefined-outer-name # pylint: disable=redefined-outer-name
import os import os
import rdkit.Chem as Chem import rdkit.Chem as Chem
......
# pylint: disable=C0111, C0103, E1101, W0611, W0612 # pylint: disable=C0111, C0103, E1101, W0611, W0612, W0221, E1102
import numpy as np import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment