ANI-1x-training.py 3.68 KB
Newer Older
Xiang Gao's avatar
Xiang Gao committed
1
2
3
4
5
6
7
8
import torch
import torchani
import torchani.data
import math
import timeit
import pickle
from tensorboardX import SummaryWriter
from tqdm import tqdm
9
from common import get_or_create_model, Averager, evaluate
Xiang Gao's avatar
Xiang Gao committed
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

chunk_size = 256
batch_chunks = 1024 // chunk_size

with open('data/dataset.dat', 'rb') as f:
    training, validation, testing = pickle.load(f)

    training_sampler = torchani.data.BatchSampler(
        training, chunk_size, batch_chunks)
    validation_sampler = torchani.data.BatchSampler(
        validation, chunk_size, batch_chunks)
    testing_sampler = torchani.data.BatchSampler(
        testing, chunk_size, batch_chunks)

    training_dataloader = torch.utils.data.DataLoader(
25
26
        training, batch_sampler=training_sampler,
        collate_fn=torchani.data.collate)
Xiang Gao's avatar
Xiang Gao committed
27
    validation_dataloader = torch.utils.data.DataLoader(
28
29
        validation, batch_sampler=validation_sampler,
        collate_fn=torchani.data.collate)
Xiang Gao's avatar
Xiang Gao committed
30
    testing_dataloader = torch.utils.data.DataLoader(
31
32
        testing, batch_sampler=testing_sampler,
        collate_fn=torchani.data.collate)
Xiang Gao's avatar
Xiang Gao committed
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

writer = SummaryWriter()

checkpoint = 'checkpoint.pt'
model = get_or_create_model(checkpoint)
optimizer = torch.optim.Adam(model.parameters(), amsgrad=True)
step = 0
epoch = 0


def subset_rmse(subset_dataloader):
    a = Averager()
    for batch in subset_dataloader:
        for molecule_id in batch:
            _species = subset_dataloader.dataset.species[molecule_id]
            coordinates, energies = batch[molecule_id]
49
50
            coordinates = coordinates.to(model.aev_computer.device)
            energies = energies.to(model.aev_computer.device)
Xiang Gao's avatar
Xiang Gao committed
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
            count, squared_error = evaluate(coordinates, energies, _species)
            squared_error = squared_error.item()
            a.add(count, squared_error)
    mse = a.avg()
    rmse = math.sqrt(mse) * 627.509
    return rmse


def optimize_step(a):
    mse = a.avg()
    rmse = math.sqrt(mse.item()) * 627.509
    writer.add_scalar('training_rmse_vs_step', rmse, step)
    loss = mse if epoch < 10 else 0.5 * torch.exp(2 * mse)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()


best_validation_rmse = math.inf
best_epoch = 0
start = timeit.default_timer()
while True:
73
74
    for batch in tqdm(training_dataloader, desc='epoch {}'.format(epoch),
                      total=len(training_sampler)):
Xiang Gao's avatar
Xiang Gao committed
75
76
77
78
        a = Averager()
        for molecule_id in batch:
            _species = training.species[molecule_id]
            coordinates, energies = batch[molecule_id]
79
80
            coordinates = coordinates.to(model.aev_computer.device)
            energies = energies.to(model.aev_computer.device)
81
82
            count, squared_error = evaluate(
                model, coordinates, energies, _species)
Xiang Gao's avatar
Xiang Gao committed
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
            a.add(count, squared_error / len(_species))
        optimize_step(a)
        step += 1

    validation_rmse = subset_rmse(validation_dataloader)
    elapsed = round(timeit.default_timer() - start, 2)
    print('Epoch:', epoch, 'time:', elapsed,
          'validation rmse:', validation_rmse)
    writer.add_scalar('validation_rmse_vs_epoch', validation_rmse, epoch)
    writer.add_scalar('epoch_vs_step', epoch, step)
    writer.add_scalar('time_vs_epoch', elapsed, epoch)

    if validation_rmse < best_validation_rmse:
        best_validation_rmse = validation_rmse
        best_epoch = epoch
        writer.add_scalar('best_validation_rmse_vs_epoch',
                          best_validation_rmse, best_epoch)
        torch.save(model.state_dict(), checkpoint)
    elif epoch - best_epoch > 1000:
        print('Stop at best validation rmse:', best_validation_rmse)
        break

    epoch += 1

testing_rmse = subset_rmse(testing_dataloader)
print('Test rmse:', validation_rmse)