ANI-1x-training-Adam.py 3.74 KB
Newer Older
Xiang Gao's avatar
Xiang Gao committed
1
2
3
4
5
6
7
8
9
import torch
import torchani
import torchani.data
import math
import timeit
import sys
import pickle
from tensorboardX import SummaryWriter
from tqdm import tqdm
10
from common import get_or_create_model, Averager, evaluate
Xiang Gao's avatar
Xiang Gao committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import json

chunk_size = 256
batch_chunks = 1024 // chunk_size

with open('data/dataset.dat', 'rb') as f:
    training, validation, testing = pickle.load(f)

    training_sampler = torchani.data.BatchSampler(
        training, chunk_size, batch_chunks)
    validation_sampler = torchani.data.BatchSampler(
        validation, chunk_size, batch_chunks)
    testing_sampler = torchani.data.BatchSampler(
        testing, chunk_size, batch_chunks)

    training_dataloader = torch.utils.data.DataLoader(
27
28
        training, batch_sampler=training_sampler,
        collate_fn=torchani.data.collate)
Xiang Gao's avatar
Xiang Gao committed
29
    validation_dataloader = torch.utils.data.DataLoader(
30
31
        validation, batch_sampler=validation_sampler,
        collate_fn=torchani.data.collate)
Xiang Gao's avatar
Xiang Gao committed
32
    testing_dataloader = torch.utils.data.DataLoader(
33
34
        testing, batch_sampler=testing_sampler,
        collate_fn=torchani.data.collate)
Xiang Gao's avatar
Xiang Gao committed
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50

writer = SummaryWriter('runs/adam-{}'.format(sys.argv[1]))

checkpoint = 'checkpoint.pt'
model = get_or_create_model(checkpoint)
optimizer = torch.optim.Adam(model.parameters(), **json.loads(sys.argv[1]))
step = 0
epoch = 0


def subset_rmse(subset_dataloader):
    a = Averager()
    for batch in subset_dataloader:
        for molecule_id in batch:
            _species = subset_dataloader.dataset.species[molecule_id]
            coordinates, energies = batch[molecule_id]
51
52
            coordinates = coordinates.to(model.aev_computer.device)
            energies = energies.to(model.aev_computer.device)
53
54
            count, squared_error = evaluate(
                model, coordinates, energies, _species)
Xiang Gao's avatar
Xiang Gao committed
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
            squared_error = squared_error.item()
            a.add(count, squared_error)
    mse = a.avg()
    rmse = math.sqrt(mse) * 627.509
    return rmse


def optimize_step(a):
    mse = a.avg()
    rmse = math.sqrt(mse.item()) * 627.509
    writer.add_scalar('training_rmse_vs_step', rmse, step)
    loss = mse if epoch < 10 else 0.5 * torch.exp(2 * mse)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()


best_validation_rmse = math.inf
best_epoch = 0
start = timeit.default_timer()
while True:
76
77
78
    for batch in tqdm(training_dataloader,
                      desc='epoch {}'.format(epoch),
                      total=len(training_sampler)):
Xiang Gao's avatar
Xiang Gao committed
79
80
81
82
        a = Averager()
        for molecule_id in batch:
            _species = training.species[molecule_id]
            coordinates, energies = batch[molecule_id]
83
84
            coordinates = coordinates.to(model.aev_computer.device)
            energies = energies.to(model.aev_computer.device)
85
86
            count, squared_error = evaluate(
                model, coordinates, energies, _species)
Xiang Gao's avatar
Xiang Gao committed
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
            a.add(count, squared_error / len(_species))
        optimize_step(a)
        step += 1

    validation_rmse = subset_rmse(validation_dataloader)
    elapsed = round(timeit.default_timer() - start, 2)
    print('Epoch:', epoch, 'time:', elapsed,
          'validation rmse:', validation_rmse)
    writer.add_scalar('validation_rmse_vs_epoch', validation_rmse, epoch)
    writer.add_scalar('epoch_vs_step', epoch, step)
    writer.add_scalar('time_vs_epoch', elapsed, epoch)

    if validation_rmse < best_validation_rmse:
        best_validation_rmse = validation_rmse
        best_epoch = epoch
        writer.add_scalar('best_validation_rmse_vs_epoch',
                          best_validation_rmse, best_epoch)
    elif epoch - best_epoch > 1000:
        print('Stop at best validation rmse:', best_validation_rmse)
        break

    epoch += 1

testing_rmse = subset_rmse(testing_dataloader)
print('Test rmse:', validation_rmse)