Unverified Commit 1be78103 authored by zcxzcx1's avatar zcxzcx1 Committed by GitHub
Browse files

Add files via upload

parent f675ef76
MIT License
Copyright (c) 2022 ACEsuit/mace
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
This diff is collapsed.
This diff is collapsed.
LICENSE.md
MANIFEST.in
README.md
pyproject.toml
setup.cfg
mace/__init__.py
mace/__version__.py
mace/py.typed
mace/calculators/__init__.py
mace/calculators/foundations_models.py
mace/calculators/lammps_mace.py
mace/calculators/lammps_mliap_mace.py
mace/calculators/mace.py
mace/cli/__init__.py
mace/cli/active_learning_md.py
mace/cli/convert_cueq_e3nn.py
mace/cli/convert_device.py
mace/cli/convert_e3nn_cueq.py
mace/cli/create_lammps_model.py
mace/cli/eval_configs.py
mace/cli/fine_tuning_select.py
mace/cli/plot_train.py
mace/cli/preprocess_data.py
mace/cli/run_train.py
mace/cli/select_head.py
mace/cli/visualise_train.py
mace/data/__init__.py
mace/data/atomic_data.py
mace/data/hdf5_dataset.py
mace/data/lmdb_dataset.py
mace/data/neighborhood.py
mace/data/utils.py
mace/modules/__init__.py
mace/modules/blocks.py
mace/modules/irreps_tools.py
mace/modules/loss.py
mace/modules/models.py
mace/modules/radial.py
mace/modules/symmetric_contraction.py
mace/modules/utils.py
mace/modules/wrapper_ops.py
mace/tools/__init__.py
mace/tools/arg_parser.py
mace/tools/arg_parser_tools.py
mace/tools/cg.py
mace/tools/checkpoint.py
mace/tools/compile.py
mace/tools/default_keys.py
mace/tools/finetuning_utils.py
mace/tools/model_script_utils.py
mace/tools/multihead_tools.py
mace/tools/run_train_utils.py
mace/tools/scatter.py
mace/tools/scripts_utils.py
mace/tools/slurm_distributed.py
mace/tools/tables_utils.py
mace/tools/torch_tools.py
mace/tools/train.py
mace/tools/utils.py
mace/tools/fairchem_dataset/__init__.py
mace/tools/fairchem_dataset/lmdb_dataset_tools.py
mace/tools/torch_geometric/__init__.py
mace/tools/torch_geometric/batch.py
mace/tools/torch_geometric/data.py
mace/tools/torch_geometric/dataloader.py
mace/tools/torch_geometric/dataset.py
mace/tools/torch_geometric/seed.py
mace/tools/torch_geometric/utils.py
mace_torch.egg-info/PKG-INFO
mace_torch.egg-info/SOURCES.txt
mace_torch.egg-info/dependency_links.txt
mace_torch.egg-info/entry_points.txt
mace_torch.egg-info/requires.txt
mace_torch.egg-info/top_level.txt
scripts/__init__.py
scripts/eval_configs.py
scripts/preprocess_data.py
scripts/run_train.py
tests/__init__.py
tests/test_benchmark.py
tests/test_calculator.py
tests/test_cg.py
tests/test_compile.py
tests/test_cueq.py
tests/test_data.py
tests/test_finetuning_select.py
tests/test_foundations.py
tests/test_hessian.py
tests/test_lmdb_database.py
tests/test_models.py
tests/test_modules.py
tests/test_multifiles.py
tests/test_preprocess.py
tests/test_run_train.py
tests/test_run_train_allkeys.py
tests/test_schedulefree.py
tests/test_tools.py
\ No newline at end of file
[console_scripts]
mace_active_learning_md = mace.cli.active_learning_md:main
mace_convert_device = mace.cli.convert_device:main
mace_create_lammps_model = mace.cli.create_lammps_model:main
mace_cueq_to_e3nn = mace.cli.convert_cueq_e3nn:main
mace_e3nn_cueq = mace.cli.convert_e3nn_cueq:main
mace_eval_configs = mace.cli.eval_configs:main
mace_finetuning = mace.cli.fine_tuning_select:main
mace_plot_train = mace.cli.plot_train:main
mace_prepare_data = mace.cli.preprocess_data:main
mace_run_train = mace.cli.run_train:main
mace_select_head = mace.cli.select_head:main
torch>=1.12
e3nn==0.4.4
numpy
opt_einsum
ase
torch-ema
prettytable
matscipy
h5py
torchmetrics
python-hostlist
configargparse
GitPython
pyYAML
tqdm
lmdb
orjson
matplotlib
pandas
[cueq]
cuequivariance-torch>=0.2.0
[cueq-cuda-11]
cuequivariance-ops-torch-cu11>=0.2.0
[cueq-cuda-12]
cuequivariance-ops-torch-cu12>=0.2.0
[dev]
black
isort
mypy
pre-commit
pytest
pytest-benchmark
pylint
[fpsample]
fpsample
[schedulefree]
schedulefree
[wandb]
wandb
[build-system]
requires = [
"setuptools>=42",
"wheel",
]
build-backend = "setuptools.build_meta"
# Make isort compatible with black
[tool.isort]
profile = "black"
# Pylint
[tool.pylint.'MESSAGES CONTROL']
disable = [
"line-too-long",
"no-member",
"missing-module-docstring",
"missing-class-docstring",
"missing-function-docstring",
"too-many-arguments",
"too-many-positional-arguments",
"too-many-locals",
"too-many-return-statements",
"not-callable",
"logging-fstring-interpolation",
"logging-not-lazy",
"logging-too-many-args",
"invalid-name",
"too-few-public-methods",
"too-many-instance-attributes",
"too-many-statements",
"too-many-branches",
"import-outside-toplevel",
"cell-var-from-loop",
"duplicate-code",
"use-dict-literal",
]
[tool.pylint.MASTER]
ignore-paths = [
"^mace/tools/torch_geometric/.*$",
"^mace/tools/scatter.py$",
]
[tool.pylint.FORMAT]
max-module-lines = 1500
target-version = "py38"
[lint]
select = ["FA102"]
#!/bin/bash
#SBATCH --partition=gpu
#SBATCH --job-name=train
#SBATCH --output=train.out
#SBATCH --nodes=2
#SBATCH --ntasks=20
#SBATCH --ntasks-per-node=10
#SBATCH --gpus-per-node=10
#SBATCH --cpus-per-task=8
#SBATCH --exclusive
#SBATCH --time=1:00:00
srun python mace/scripts/run_train.py \
--name='model' \
--model='MACE' \
--num_interactions=2 \
--num_channels=128 \
--max_L=2 \
--correlation=3 \
--E0s='average' \
--r_max=5.0 \
--train_file='./h5_data/train.h5' \
--valid_file='./h5_data/valid.h5' \
--statistics_file='./h5_data/statistics.json' \
--num_workers=8 \
--batch_size=20 \
--valid_batch_size=80 \
--max_num_epochs=100 \
--loss='weighted' \
--error_table='PerAtomRMSE' \
--default_dtype='float32' \
--device='cuda' \
--distributed \
--seed=2222 \
\ No newline at end of file
## Wrapper for mace.cli.eval_configs.main ##
from mace.cli.eval_configs import main
if __name__ == "__main__":
main()
## Wrapper for mace.cli.run_train.main ##
from mace.cli.preprocess_data import main
if __name__ == "__main__":
main()
# Format
python -m black .
python -m isort .
# Check
python -m pylint --rcfile=pyproject.toml mace tests scripts
# Tests
python -m pytest tests
## Wrapper for mace.cli.run_train.main ##
from mace.cli.run_train import main
if __name__ == "__main__":
main()
[metadata]
name = mace-torch
version = attr: mace.__version__
short_description = MACE - Fast and accurate machine learning interatomic potentials with higher order equivariant message passing.
long_description = file: README.md
long_description_content_type = text/markdown
url = https://github.com/ACEsuit/mace
classifiers =
Programming Language :: Python :: 3
Operating System :: OS Independent
License :: OSI Approved :: MIT License
[options]
packages = find:
python_requires = >=3.7
install_requires =
torch>=1.12
e3nn==0.4.4
numpy
opt_einsum
ase
torch-ema
prettytable
matscipy
h5py
torchmetrics
python-hostlist
configargparse
GitPython
pyYAML
tqdm
lmdb
orjson
# for plotting:
matplotlib
pandas
[options.entry_points]
console_scripts =
mace_active_learning_md = mace.cli.active_learning_md:main
mace_create_lammps_model = mace.cli.create_lammps_model:main
mace_eval_configs = mace.cli.eval_configs:main
mace_plot_train = mace.cli.plot_train:main
mace_run_train = mace.cli.run_train:main
mace_prepare_data = mace.cli.preprocess_data:main
mace_finetuning = mace.cli.fine_tuning_select:main
mace_convert_device = mace.cli.convert_device:main
mace_select_head = mace.cli.select_head:main
mace_e3nn_cueq = mace.cli.convert_e3nn_cueq:main
mace_cueq_to_e3nn = mace.cli.convert_cueq_e3nn:main
[options.extras_require]
wandb = wandb
fpsample = fpsample
dev =
black
isort
mypy
pre-commit
pytest
pytest-benchmark
pylint
schedulefree = schedulefree
cueq = cuequivariance-torch>=0.2.0
cueq-cuda-11 = cuequivariance-ops-torch-cu11>=0.2.0
cueq-cuda-12 = cuequivariance-ops-torch-cu12>=0.2.0
import os
os.environ["TORCH_FORCE_NO_WEIGHTS_ONLY_LOAD"] = "1"
import pytest
import torch
from mace.modules.radial import AgnesiTransform, ZBLBasis
@pytest.fixture
def zbl_basis():
return ZBLBasis(p=6, trainable=False)
def test_zbl_basis_initialization(zbl_basis):
assert zbl_basis.p == torch.tensor(6.0)
assert torch.allclose(zbl_basis.c, torch.tensor([0.1818, 0.5099, 0.2802, 0.02817]))
assert zbl_basis.a_exp == torch.tensor(0.300)
assert zbl_basis.a_prefactor == torch.tensor(0.4543)
assert not zbl_basis.a_exp.requires_grad
assert not zbl_basis.a_prefactor.requires_grad
def test_trainable_zbl_basis_initialization(zbl_basis):
zbl_basis = ZBLBasis(p=6, trainable=True)
assert zbl_basis.p == torch.tensor(6.0)
assert torch.allclose(zbl_basis.c, torch.tensor([0.1818, 0.5099, 0.2802, 0.02817]))
assert zbl_basis.a_exp == torch.tensor(0.300)
assert zbl_basis.a_prefactor == torch.tensor(0.4543)
assert zbl_basis.a_exp.requires_grad
assert zbl_basis.a_prefactor.requires_grad
def test_forward(zbl_basis):
x = torch.tensor([1.0, 1.0, 2.0]).unsqueeze(-1) # [n_edges]
node_attrs = torch.tensor(
[[1, 0], [0, 1]]
) # [n_nodes, n_node_features] - one_hot encoding of atomic numbers
edge_index = torch.tensor([[0, 1, 1], [1, 0, 1]]) # [2, n_edges]
atomic_numbers = torch.tensor([1, 6]) # [n_nodes]
output = zbl_basis(x, node_attrs, edge_index, atomic_numbers)
assert output.shape == torch.Size([node_attrs.shape[0]])
assert torch.is_tensor(output)
assert torch.allclose(
output,
torch.tensor([0.0031, 0.0031], dtype=torch.get_default_dtype()),
rtol=1e-2,
)
@pytest.fixture
def agnesi():
return AgnesiTransform(trainable=False)
def test_agnesi_transform_initialization(agnesi: AgnesiTransform):
assert agnesi.q.item() == pytest.approx(0.9183, rel=1e-4)
assert agnesi.p.item() == pytest.approx(4.5791, rel=1e-4)
assert agnesi.a.item() == pytest.approx(1.0805, rel=1e-4)
assert not agnesi.a.requires_grad
assert not agnesi.q.requires_grad
assert not agnesi.p.requires_grad
def test_trainable_agnesi_transform_initialization():
agnesi = AgnesiTransform(trainable=True)
assert agnesi.q.item() == pytest.approx(0.9183, rel=1e-4)
assert agnesi.p.item() == pytest.approx(4.5791, rel=1e-4)
assert agnesi.a.item() == pytest.approx(1.0805, rel=1e-4)
assert agnesi.a.requires_grad
assert agnesi.q.requires_grad
assert agnesi.p.requires_grad
def test_agnesi_transform_forward():
agnesi = AgnesiTransform()
x = torch.tensor([1.0, 2.0, 3.0], dtype=torch.get_default_dtype()).unsqueeze(-1)
node_attrs = torch.tensor([[0, 1], [1, 0], [0, 1]], dtype=torch.get_default_dtype())
edge_index = torch.tensor([[0, 1, 2], [1, 2, 0]])
atomic_numbers = torch.tensor([1, 6, 8])
output = agnesi(x, node_attrs, edge_index, atomic_numbers)
assert output.shape == x.shape
assert torch.is_tensor(output)
assert torch.allclose(
output,
torch.tensor(
[0.3646, 0.2175, 0.2089], dtype=torch.get_default_dtype()
).unsqueeze(-1),
rtol=1e-2,
)
if __name__ == "__main__":
pytest.main([__file__])
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment