Unverified Commit 9a03bc62 authored by Ignacio Pickering's avatar Ignacio Pickering Committed by GitHub
Browse files

Fix gradcheck test (#493)

* Remove test gradient from aev tests

* Add a dedicated test gradient for one model, and make it feasible

* add test_grad to unittests

* Add some comments and fix formatting

* flake8

* flake8

* remove unused var
parent 5e6f6400
...@@ -20,7 +20,8 @@ jobs: ...@@ -20,7 +20,8 @@ jobs:
test_aev.py, test_aev_benzene_md.py, test_aev_nist.py, test_aev_tripeptide_md.py, test_aev.py, test_aev_benzene_md.py, test_aev_nist.py, test_aev_tripeptide_md.py,
test_utils.py, test_ase.py, test_energies.py, test_periodic_table_indexing.py, test_utils.py, test_ase.py, test_energies.py, test_periodic_table_indexing.py,
test_neurochem.py, test_vibrational.py, test_ensemble.py, test_padding.py, test_neurochem.py, test_vibrational.py, test_ensemble.py, test_padding.py,
test_data.py, test_forces.py, test_structure_optim.py, test_jit_builtin_models.py] test_data.py, test_forces.py, test_structure_optim.py, test_jit_builtin_models.py,
test_grad.py]
steps: steps:
- uses: actions/checkout@v1 - uses: actions/checkout@v1
......
...@@ -3,7 +3,6 @@ import torchani ...@@ -3,7 +3,6 @@ import torchani
import unittest import unittest
import os import os
import pickle import pickle
import copy
import itertools import itertools
import ase import ase
import ase.io import ase.io
...@@ -132,41 +131,6 @@ class TestAEV(_TestAEVBase): ...@@ -132,41 +131,6 @@ class TestAEV(_TestAEVBase):
start += conformations start += conformations
self.assertAEVEqual(expected_radial, expected_angular, aev_) self.assertAEVEqual(expected_radial, expected_angular, aev_)
@unittest.skipIf(not torch.cuda.is_available(), "Too slow on CPU")
def testGradient(self):
"""Test validity of autodiff by comparing analytical and numerical
gradients.
"""
datafile = os.path.join(path, 'test_data/NIST/all')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Create local copy of aev_computer to avoid interference with other
# tests.
aev_computer = copy.deepcopy(self.aev_computer).to(device).to(torch.float64)
with open(datafile, 'rb') as f:
data = pickle.load(f)
for coordinates, species, _, _, _, _ in data:
coordinates = torch.from_numpy(coordinates).to(device).to(torch.float64)
coordinates.requires_grad_(True)
species = torch.from_numpy(species).to(device)
# PyTorch gradcheck expects to test a funtion with inputs and
# outputs of type torch.Tensor. The numerical estimation of
# the deriviate involves making small modifications to the
# input and observing how it affects the output. The species
# tensor needs to be removed from the input so that gradcheck
# does not attempt to estimate the gradient with respect to
# species and fail.
# Create simple function wrapper to handle this.
def aev_forward_wrapper(coords):
# Return only the aev portion of the output.
return aev_computer((species, coords))[1]
# Sanity Check: Forward wrapper returns aev without error.
aev_forward_wrapper(coordinates)
torch.autograd.gradcheck(
aev_forward_wrapper,
coordinates
)
class TestAEVJIT(TestAEV): class TestAEVJIT(TestAEV):
def setUp(self): def setUp(self):
......
import torch
import torchani
import unittest
import os
import pickle
path = os.path.dirname(os.path.realpath(__file__))
class TestGrad(unittest.TestCase):
# torch.autograd.gradcheck and torch.autograd.gradgradcheck verify that
# the numerical and analytical gradient and hessian of a function
# matches to within a given tolerance.
#
# The forward call of the function is wrapped with a lambda so that
# gradcheck gets a function with only one tensor input and tensor output.
# nondet_tol is necessarily greater than zero since some operations are
# nondeterministic which makes two equal inputs have different outputs
def setUp(self):
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
self.model = torchani.models.ANI1x(model_index=0).to(device=self.device,
dtype=torch.double)
datafile = os.path.join(path, 'test_data/NIST/all')
# Some small molecules are selected to make the tests faster
self.data = pickle.load(open(datafile, 'rb'))[1243:1250]
def testGradCheck(self):
for coordinates, species, _, _, _, _ in self.data:
coordinates = torch.from_numpy(coordinates).to(device=self.device,
dtype=torch.float64)
coordinates.requires_grad_(True)
species = torch.from_numpy(species).to(self.device)
torch.autograd.gradcheck(lambda x: self.model((species, x)).energies,
coordinates,
nondet_tol=1e-13)
def testGradGradCheck(self):
for coordinates, species, _, _, _, _ in self.data:
coordinates = torch.from_numpy(coordinates).to(device=self.device,
dtype=torch.float64)
coordinates.requires_grad_(True)
species = torch.from_numpy(species).to(self.device)
torch.autograd.gradgradcheck(lambda x: self.model((species, x)).energies,
coordinates,
nondet_tol=1e-13)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment