"src/vscode:/vscode.git/clone" did not exist on "b8f905f18b29cdcaba2a4758ac29bf44b9b7dff9"
Unverified Commit b76ac11c authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[DGL-LifeSci] Release Preparation (CI, Docker, Conda build) (#1399)



* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* add docs

* Fix style

* Fix lint

* Bug fix

* Fix test

* Update

* Update

* Update

* Update
Co-authored-by: default avatarMinjie Wang <wmjlyjemaine@gmail.com>
parent e4cc8185
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import dgllife import os
import sys
from setuptools import find_packages from setuptools import find_packages
from setuptools import setup
if '--inplace' in sys.argv: CURRENT_DIR = os.path.dirname(__file__)
from distutils.core import setup
else: def get_lib_path():
from setuptools import setup """Get library path, name and version"""
# We can not import `libinfo.py` in setup.py directly since __init__.py
# Will be invoked which introduces dependences
libinfo_py = os.path.join(CURRENT_DIR, './dgllife/libinfo.py')
libinfo = {'__file__': libinfo_py}
exec(compile(open(libinfo_py, "rb").read(), libinfo_py, 'exec'), libinfo, libinfo)
version = libinfo['__version__']
return version
VERSION = get_lib_path()
setup( setup(
name='dgllife', name='dgllife',
version=dgllife.__version__, version=VERSION,
description='DGL-based package for Life Science', description='DGL-based package for Life Science',
keywords=[ keywords=[
'pytorch', 'pytorch',
...@@ -26,9 +36,9 @@ setup( ...@@ -26,9 +36,9 @@ setup(
packages=[package for package in find_packages() packages=[package for package in find_packages()
if package.startswith('dgllife')], if package.startswith('dgllife')],
install_requires=[ install_requires=[
'torch>=1' 'torch>=1.1'
'scikit-learn>=0.22.2', 'scikit-learn>=0.22.2',
'pandas>=0.25.1', 'pandas>=0.24.2',
'requests>=2.22.0', 'requests>=2.22.0',
'tqdm' 'tqdm'
], ],
...@@ -36,5 +46,7 @@ setup( ...@@ -36,5 +46,7 @@ setup(
classifiers=[ classifiers=[
'Development Status :: 3 - Alpha', 'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License'
], ],
license='APACHE'
) )
"""
This is the global script that set the version information of DGL-LifeSci.
This script runs and update all the locations that related to versions
List of affected files:
- app-root/python/dgllife/__init__.py
- app-root/conda/dgllife/meta.yaml
"""
import os
import re
__version__ = "0.2.0"
print(__version__)
# Implementations
def update(file_name, pattern, repl):
update = []
hit_counter = 0
need_update = False
for l in open(file_name):
result = re.findall(pattern, l)
if result:
assert len(result) == 1
hit_counter += 1
if result[0] != repl:
l = re.sub(pattern, repl, l)
need_update = True
print("%s: %s->%s" % (file_name, result[0], repl))
else:
print("%s: version is already %s" % (file_name, repl))
update.append(l)
if hit_counter != 1:
raise RuntimeError("Cannot find version in %s" % file_name)
if need_update:
with open(file_name, "w") as output_file:
for l in update:
output_file.write(l)
def main():
curr_dir = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_dir, ".."))
# python path
update(os.path.join(proj_root, "python/dgllife/libinfo.py"),
r"(?<=__version__ = \")[.0-9a-z]+", __version__)
# conda
update(os.path.join(proj_root, "conda/dgllife/meta.yaml"),
"(?<=version: \")[.0-9a-z]+", __version__)
if __name__ == '__main__':
main()
[MASTER]
# Adapted from github.com/dmlc/dgl/tests/lint/pylintrc
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code.
extension-pkg-whitelist=
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS,_cy2,_cy3,backend,data,contrib
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
ignore-patterns=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
# number of processors available to use.
jobs=4
# Control the amount of potential inferred values when inferring a single
# object. This can help the performance when dealing with large functions or
# complex, nested conditions.
limit-inference-results=100
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Specify a configuration file.
#rcfile=
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages.
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
confidence=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once). You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=design,
similarities,
no-self-use,
attribute-defined-outside-init,
locally-disabled,
star-args,
pointless-except,
bad-option-value,
global-statement,
fixme,
suppressed-message,
useless-suppression,
locally-enabled,
import-error,
unsubscriptable-object,
unbalanced-tuple-unpacking,
protected-access,
useless-object-inheritance,
no-else-return,
len-as-condition,
cyclic-import, # disabled due to the inevitable dgl.graph -> dgl.subgraph loop
undefined-variable, # disabled due to C extension (should enable)
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=c-extension-no-member
[REPORTS]
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details.
#msg-template=
# Set the output format. Available formats are text, parseable, colorized, json
# and msvs (visual studio). You can also give a reporter class, e.g.
# mypackage.mymodule.MyReporterClass.
output-format=text
# Tells whether to display a full report or only the messages.
reports=no
# Activate the evaluation score.
score=yes
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=sys.exit
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
[BASIC]
# Naming style matching correct argument names.
argument-naming-style=snake_case
# Regular expression matching correct argument names. Overrides argument-
# naming-style.
#argument-rgx=
# Naming style matching correct attribute names.
attr-naming-style=snake_case
# Regular expression matching correct attribute names. Overrides attr-naming-
# style.
#attr-rgx=
# Bad variable names which should always be refused, separated by a comma.
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Naming style matching correct class attribute names.
class-attribute-naming-style=any
# Regular expression matching correct class attribute names. Overrides class-
# attribute-naming-style.
#class-attribute-rgx=
# Naming style matching correct class names.
class-naming-style=PascalCase
# Regular expression matching correct class names. Overrides class-naming-
# style.
#class-rgx=
# Naming style matching correct constant names.
const-naming-style=UPPER_CASE
# Regular expression matching correct constant names. Overrides const-naming-
# style.
#const-rgx=
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming style matching correct function names.
function-naming-style=snake_case
# Regular expression matching correct function names. Overrides function-
# naming-style.
#function-rgx=
# Good variable names which should always be accepted, separated by a comma.
good-names=i,j,k,u,v,e,n,m,w,x,y,g,G,hg,fn,ex,Run,_
# Include a hint for the correct naming format with invalid-name.
include-naming-hint=no
# Naming style matching correct inline iteration names.
inlinevar-naming-style=any
# Regular expression matching correct inline iteration names. Overrides
# inlinevar-naming-style.
#inlinevar-rgx=
# Naming style matching correct method names.
method-naming-style=snake_case
# Regular expression matching correct method names. Overrides method-naming-
# style.
#method-rgx=
# Naming style matching correct module names.
module-naming-style=snake_case
# Regular expression matching correct module names. Overrides module-naming-
# style.
#module-rgx=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
# These decorators are taken in consideration only for invalid-name.
property-classes=abc.abstractproperty
# Naming style matching correct variable names.
variable-naming-style=snake_case
# Regular expression matching correct variable names. Overrides variable-
# naming-style.
#variable-rgx=
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid defining new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expected to
# not be used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored. Default to name
# with leading underscore.
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
[SPELLING]
# Limits count of emitted suggestions for spelling mistakes.
max-spelling-suggestions=4
# Spelling dictionary name. Available dictionaries: none. To make it working
# install python-enchant package..
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to indicated private dictionary in
# --spelling-private-dict-file option instead of raising a message.
spelling-store-unknown-words=no
[LOGGING]
# Format style used to check logging format string. `old` means using %
# formatting, while `new` is for `{}` formatting.
logging-format-style=old
# Logging modules to check that the string format arguments are in logging
# function parameter format.
logging-modules=logging
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=100
# Maximum number of lines in a module.
max-module-lines=4000
# List of optional constructs for which whitespace checking is disabled. `dict-
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
# `empty-line` allows space-only lines.
no-space-check=trailing-comma,
dict-separator
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[SIMILARITIES]
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
# Minimum lines number of a similarity.
min-similarity-lines=4
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# Tells whether to warn about missing members when the owner of the attribute
# is inferred to be None.
ignore-none=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=dgl.backend,dgl._api_internal
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
[IMPORTS]
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=yes
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Deprecated modules which should not be used, separated by a comma.
deprecated-modules=optparse,tkinter.tix
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled).
ext-import-graph=
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled).
import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled).
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
[DESIGN]
# Maximum number of arguments for function / method.
max-args=5
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Maximum number of boolean expressions in an if statement.
max-bool-expr=5
# Maximum number of branch for function / method body.
max-branches=12
# Maximum number of locals for function / method body.
max-locals=15
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body.
max-returns=6
# Maximum number of statements in function / method body.
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,
_fields,
_replace,
_source,
_make
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=cls
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception".
overgeneral-exceptions=Exception
\ No newline at end of file
...@@ -81,7 +81,7 @@ def test_gat(): ...@@ -81,7 +81,7 @@ def test_gat():
bg, batch_node_feats = bg.to(device), batch_node_feats.to(device) bg, batch_node_feats = bg.to(device), batch_node_feats.to(device)
# Test default setting # Test default setting
gnn = GAT(in_feats=1) gnn = GAT(in_feats=1).to(device)
assert gnn(g, node_feats).shape == torch.Size([3, 32]) assert gnn(g, node_feats).shape == torch.Size([3, 32])
assert gnn(bg, batch_node_feats).shape == torch.Size([8, 32]) assert gnn(bg, batch_node_feats).shape == torch.Size([8, 32])
...@@ -196,7 +196,7 @@ def test_mpnn_gnn(): ...@@ -196,7 +196,7 @@ def test_mpnn_gnn():
# Test default setting # Test default setting
gnn = MPNNGNN(node_in_feats=1, gnn = MPNNGNN(node_in_feats=1,
edge_in_feats=2) edge_in_feats=2).to(device)
assert gnn(g, node_feats, edge_feats).shape == torch.Size([3, 64]) assert gnn(g, node_feats, edge_feats).shape == torch.Size([3, 64])
assert gnn(bg, batch_node_feats, batch_edge_feats).shape == torch.Size([8, 64]) assert gnn(bg, batch_node_feats, batch_edge_feats).shape == torch.Size([8, 64])
...@@ -223,7 +223,7 @@ def test_wln(): ...@@ -223,7 +223,7 @@ def test_wln():
# Test default setting # Test default setting
gnn = WLN(node_in_feats=1, gnn = WLN(node_in_feats=1,
edge_in_feats=2) edge_in_feats=2).to(device)
assert gnn(g, node_feats, edge_feats).shape == torch.Size([3, 300]) assert gnn(g, node_feats, edge_feats).shape == torch.Size([3, 300])
assert gnn(bg, batch_node_feats, batch_edge_feats).shape == torch.Size([8, 300]) assert gnn(bg, batch_node_feats, batch_edge_feats).shape == torch.Size([8, 300])
...@@ -231,7 +231,7 @@ def test_wln(): ...@@ -231,7 +231,7 @@ def test_wln():
gnn = WLN(node_in_feats=1, gnn = WLN(node_in_feats=1,
edge_in_feats=2, edge_in_feats=2,
node_out_feats=3, node_out_feats=3,
n_layers=1) n_layers=1).to(device)
assert gnn(g, node_feats, edge_feats).shape == torch.Size([3, 3]) assert gnn(g, node_feats, edge_feats).shape == torch.Size([3, 3])
assert gnn(bg, batch_node_feats, batch_edge_feats).shape == torch.Size([8, 3]) assert gnn(bg, batch_node_feats, batch_edge_feats).shape == torch.Size([8, 3])
......
import dgl import dgl
import torch import torch
import torch.nn.functional as F
from dgl import DGLGraph from dgl import DGLGraph
...@@ -65,7 +66,7 @@ def test_gcn_predictor(): ...@@ -65,7 +66,7 @@ def test_gcn_predictor():
bg, batch_node_feats = bg.to(device), batch_node_feats.to(device) bg, batch_node_feats = bg.to(device), batch_node_feats.to(device)
# Test default setting # Test default setting
gcn_predictor = GCNPredictor(in_feats=1) gcn_predictor = GCNPredictor(in_feats=1).to(device)
gcn_predictor.eval() gcn_predictor.eval()
assert gcn_predictor(g, node_feats).shape == torch.Size([1, 1]) assert gcn_predictor(g, node_feats).shape == torch.Size([1, 1])
gcn_predictor.train() gcn_predictor.train()
...@@ -213,7 +214,7 @@ def test_mpnn_predictor(): ...@@ -213,7 +214,7 @@ def test_mpnn_predictor():
# Test default setting # Test default setting
mpnn_predictor = MPNNPredictor(node_in_feats=1, mpnn_predictor = MPNNPredictor(node_in_feats=1,
edge_in_feats=2) edge_in_feats=2).to(device)
assert mpnn_predictor(g, node_feats, edge_feats).shape == torch.Size([1, 1]) assert mpnn_predictor(g, node_feats, edge_feats).shape == torch.Size([1, 1])
assert mpnn_predictor(bg, batch_node_feats, batch_edge_feats).shape == \ assert mpnn_predictor(bg, batch_node_feats, batch_edge_feats).shape == \
torch.Size([2, 1]) torch.Size([2, 1])
...@@ -226,7 +227,7 @@ def test_mpnn_predictor(): ...@@ -226,7 +227,7 @@ def test_mpnn_predictor():
n_tasks=2, n_tasks=2,
num_step_message_passing=2, num_step_message_passing=2,
num_step_set2set=2, num_step_set2set=2,
num_layer_set2set=2) num_layer_set2set=2).to(device)
assert mpnn_predictor(g, node_feats, edge_feats).shape == torch.Size([1, 2]) assert mpnn_predictor(g, node_feats, edge_feats).shape == torch.Size([1, 2])
assert mpnn_predictor(bg, batch_node_feats, batch_edge_feats).shape == \ assert mpnn_predictor(bg, batch_node_feats, batch_edge_feats).shape == \
torch.Size([2, 2]) torch.Size([2, 2])
......
...@@ -65,10 +65,10 @@ def test_wln_reaction_center(): ...@@ -65,10 +65,10 @@ def test_wln_reaction_center():
model = WLNReactionCenter(node_in_feats=1, model = WLNReactionCenter(node_in_feats=1,
edge_in_feats=2, edge_in_feats=2,
node_pair_in_feats=1).to(device) node_pair_in_feats=1).to(device)
assert model(mol_graph, complete_graph, node_feats, edge_feats, atom_pair_feats).shape == \ assert model(mol_graph, complete_graph, node_feats, edge_feats, atom_pair_feats)[0].shape == \
torch.Size([complete_graph.number_of_edges(), 5]) torch.Size([complete_graph.number_of_edges(), 5])
assert model(batch_mol_graph, batch_complete_graph, batch_node_feats, assert model(batch_mol_graph, batch_complete_graph, batch_node_feats,
batch_edge_feats, batch_atom_pair_feats).shape == \ batch_edge_feats, batch_atom_pair_feats)[0].shape == \
torch.Size([batch_complete_graph.number_of_edges(), 5]) torch.Size([batch_complete_graph.number_of_edges(), 5])
# Test configured setting # Test configured setting
...@@ -78,10 +78,10 @@ def test_wln_reaction_center(): ...@@ -78,10 +78,10 @@ def test_wln_reaction_center():
node_out_feats=1, node_out_feats=1,
n_layers=1, n_layers=1,
n_tasks=1).to(device) n_tasks=1).to(device)
assert model(mol_graph, complete_graph, node_feats, edge_feats, atom_pair_feats).shape == \ assert model(mol_graph, complete_graph, node_feats, edge_feats, atom_pair_feats)[0].shape == \
torch.Size([complete_graph.number_of_edges(), 1]) torch.Size([complete_graph.number_of_edges(), 1])
assert model(batch_mol_graph, batch_complete_graph, batch_node_feats, assert model(batch_mol_graph, batch_complete_graph, batch_node_feats,
batch_edge_feats, batch_atom_pair_feats).shape == \ batch_edge_feats, batch_atom_pair_feats)[0].shape == \
torch.Size([batch_complete_graph.number_of_edges(), 1]) torch.Size([batch_complete_graph.number_of_edges(), 1])
if __name__ == '__main__': if __name__ == '__main__':
......
#!/bin/bash
# Argument
# - dev: cpu or gpu
if [ $# -ne 1 ]; then
echo "Device argument required, can be cpu or gpu"
exit -1
fi
dev=$1
set -e
. /opt/conda/etc/profile.d/conda.sh
rm -rf _deps
mkdir _deps
pushd _deps
conda activate "pytorch-ci"
if [ "$dev" == "gpu" ]; then
pip uninstall -y dgl
pip install --pre dgl
python3 setup.py install
else
pip uninstall -y dgl-cu101
pip install --pre dgl-cu101
python3 setup.py install
fi
popd
\ No newline at end of file
#!/bin/bash
# Adapted from github.com/dmlc/dgl/tests/scripts/task_lint.sh
# pylint
echo 'Checking code style of python codes...'
python3 -m pylint --reports=y -v --rcfile=tests/lint/pylintrc python/dgllife || exit 1
\ No newline at end of file
#!/bin/bash
. /opt/conda/etc/profile.d/conda.sh
function fail {
echo FAIL: $@
exit -1
}
function usage {
echo "Usage: $0 backend device"
}
if [ $# -ne 2 ]; then
usage
fail "Error: must specify backend and device"
fi
export DGLBACKEND=$1
export PYTHONPATH=${PWD}/python:$PYTHONPATH
export DGL_DOWNLOAD_DIR=${PWD}
if [ $2 == "gpu" ]
then
export CUDA_VISIBLE_DEVICES=0
else
export CUDA_VISIBLE_DEVICES=-1
fi
conda activate ${DGLBACKEND}-ci
pip install _deps/dgl*.whl
python3 -m pytest -v --junitxml=pytest_data.xml tests/data || fail "data"
python3 -m pytest -v --junitxml=pytest_model.xml tests/model || fail "model"
python3 -m pytest -v --junitxml=pytest_utils.xml tests/utils || fail "utils"
\ No newline at end of file
...@@ -37,8 +37,8 @@ def test_acnn_graph_construction_and_featurization(): ...@@ -37,8 +37,8 @@ def test_acnn_graph_construction_and_featurization():
pocket_mol, pocket_mol,
ligand_coords, ligand_coords,
pocket_coords) pocket_coords)
assert g.ntypes == ['protein_atom', 'ligand_atom'] assert set(g.ntypes) == set(['protein_atom', 'ligand_atom'])
assert g.etypes == ['protein', 'ligand', 'complex', 'complex', 'complex', 'complex'] assert set(g.etypes) == set(['protein', 'ligand', 'complex', 'complex', 'complex', 'complex'])
assert g.number_of_nodes('protein_atom') == 286 assert g.number_of_nodes('protein_atom') == 286
assert g.number_of_nodes('ligand_atom') == 21 assert g.number_of_nodes('ligand_atom') == 21
......
...@@ -61,22 +61,14 @@ def test_Meter(): ...@@ -61,22 +61,14 @@ def test_Meter():
meter = Meter(label_mean, label_std) meter = Meter(label_mean, label_std)
meter.update(label, pred) meter.update(label, pred)
true_scores = [0.22125875529784111, 0.5937311018897714] true_scores = [0.22125875529784111, 0.5937311018897714]
assert meter.rmse() == true_scores assert torch.allclose(torch.tensor(meter.rmse()), torch.tensor(true_scores))
assert meter.rmse('mean') == np.mean(true_scores) assert torch.allclose(torch.tensor(meter.compute_metric('rmse')), torch.tensor(true_scores))
assert meter.rmse('sum') == np.sum(true_scores)
assert meter.compute_metric('rmse') == true_scores
assert meter.compute_metric('rmse', 'mean') == np.mean(true_scores)
assert meter.compute_metric('rmse', 'sum') == np.sum(true_scores)
meter = Meter(label_mean, label_std) meter = Meter(label_mean, label_std)
meter.update(label, pred, mask) meter.update(label, pred, mask)
true_scores = [0.1337071188699867, 0.5019903799993205] true_scores = [0.1337071188699867, 0.5019903799993205]
assert meter.rmse() == true_scores assert torch.allclose(torch.tensor(meter.rmse()), torch.tensor(true_scores))
assert meter.rmse('mean') == np.mean(true_scores) assert torch.allclose(torch.tensor(meter.compute_metric('rmse')), torch.tensor(true_scores))
assert meter.rmse('sum') == np.sum(true_scores)
assert meter.compute_metric('rmse') == true_scores
assert meter.compute_metric('rmse', 'mean') == np.mean(true_scores)
assert meter.compute_metric('rmse', 'sum') == np.sum(true_scores)
# roc auc score # roc auc score
meter = Meter() meter = Meter()
......
...@@ -3,19 +3,19 @@ import os ...@@ -3,19 +3,19 @@ import os
import shutil import shutil
from dgl.data.utils import download, _get_dgl_url, extract_archive from dgl.data.utils import download, _get_dgl_url, extract_archive
from dgllife.utils.rdkit_utils import get_mol_3D_coordinates, load_molecule from dgllife.utils.rdkit_utils import get_mol_3d_coordinates, load_molecule
from rdkit import Chem from rdkit import Chem
from rdkit.Chem import AllChem from rdkit.Chem import AllChem
def test_get_mol_3D_coordinates(): def test_get_mol_3D_coordinates():
mol = Chem.MolFromSmiles('CCO') mol = Chem.MolFromSmiles('CCO')
# Test the case when conformation does not exist # Test the case when conformation does not exist
assert get_mol_3D_coordinates(mol) is None assert get_mol_3d_coordinates(mol) is None
# Test the case when conformation exists # Test the case when conformation exists
AllChem.EmbedMolecule(mol) AllChem.EmbedMolecule(mol)
AllChem.MMFFOptimizeMolecule(mol) AllChem.MMFFOptimizeMolecule(mol)
coords = get_mol_3D_coordinates(mol) coords = get_mol_3d_coordinates(mol)
assert isinstance(coords, np.ndarray) assert isinstance(coords, np.ndarray)
assert coords.shape == (mol.GetNumAtoms(), 3) assert coords.shape == (mol.GetNumAtoms(), 3)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment