nnp_training.py 2.36 KB
Newer Older
1
2
3
4
5
import sys
import torch
import ignite
import torchani
import model
6
7
8
9
import tqdm
import timeit
import tensorboardX
import math
10

11
12
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

13
14
15
16
17
chunk_size = 256
batch_chunks = 4
dataset_path = sys.argv[1]
dataset_checkpoint = 'dataset-checkpoint.dat'
model_checkpoint = 'checkpoint.pt'
18
19
20
21
max_epochs = 10

writer = tensorboardX.SummaryWriter()
start = timeit.default_timer()
22
23
24

shift_energy = torchani.EnergyShifter()
training, validation, testing = torchani.data.load_or_create(
25
    dataset_checkpoint, dataset_path, chunk_size, device=device,
26
27
28
    transform=[shift_energy.dataset_subtract_sae])
training = torchani.data.dataloader(training, batch_chunks)
validation = torchani.data.dataloader(validation, batch_chunks)
29
nnp = model.get_or_create_model(model_checkpoint, device=device)
30
batch_nnp = torchani.models.BatchModel(nnp)
31
32
container = torchani.ignite.Container({'energies': batch_nnp})
optimizer = torch.optim.Adam(nnp.parameters())
33

34
35
36
37
38
39
40
trainer = ignite.engine.create_supervised_trainer(
    container, optimizer, torchani.ignite.energy_mse_loss)
evaluator = ignite.engine.create_supervised_evaluator(container, metrics={
        'RMSE': torchani.ignite.energy_rmse_metric
    })


41
42
43
44
45
46
47
48
49
def hartree2kcal(x):
    return 627.509 * x


@trainer.on(ignite.engine.Events.EPOCH_STARTED)
def init_tqdm(trainer):
    trainer.state.tqdm = tqdm.tqdm(total=len(training), desc='epoch')


50
@trainer.on(ignite.engine.Events.ITERATION_COMPLETED)
51
52
def update_tqdm(trainer):
    trainer.state.tqdm.update(1)
53
54
55


@trainer.on(ignite.engine.Events.EPOCH_COMPLETED)
56
57
def finalize_tqdm(trainer):
    trainer.state.tqdm.close()
58
59


60
@trainer.on(ignite.engine.Events.EPOCH_STARTED)
61
62
63
def log_validation_results(trainer):
    evaluator.run(validation)
    metrics = evaluator.state.metrics
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
    rmse = hartree2kcal(metrics['RMSE'])
    writer.add_scalar('validation_rmse_vs_epoch', rmse, trainer.state.epoch)


@trainer.on(ignite.engine.Events.EPOCH_STARTED)
def log_time(trainer):
    elapsed = round(timeit.default_timer() - start, 2)
    writer.add_scalar('time_vs_epoch', elapsed, trainer.state.epoch)


@trainer.on(ignite.engine.Events.ITERATION_COMPLETED)
def log_loss_and_time(trainer):
    iteration = trainer.state.iteration
    rmse = hartree2kcal(math.sqrt(trainer.state.output))
    writer.add_scalar('training_rmse_vs_iteration', rmse, iteration)
79
80


81
trainer.run(training, max_epochs=max_epochs)