import sys import numpy as np from tqdm import tqdm import torch import torch.nn as nn import torch.optim as optim from dgl.data.gindt import GINDataset from dataloader import GraphDataLoader, collate from parser import Parser from gin import GIN def train(args, net, trainloader, optimizer, criterion, epoch): net.train() running_loss = 0 total_iters = len(trainloader) # setup the offset to avoid the overlap with mouse cursor bar = tqdm(range(total_iters), unit='batch', position=2, file=sys.stdout) for pos, (graphs, labels) in zip(bar, trainloader): # batch graphs will be shipped to device in forward part of model labels = labels.to(args.device) outputs = net(graphs) loss = criterion(outputs, labels) running_loss += loss.item() # backprop if optimizer is not None: optimizer.zero_grad() loss.backward() optimizer.step() # report bar.set_description('epoch-{}'.format(epoch)) bar.close() # the final batch will be aligned running_loss = running_loss / total_iters return running_loss def eval_net(args, net, dataloader, criterion): net.eval() total = 0 total_loss = 0 total_correct = 0 # total_iters = len(dataloader) for data in dataloader: graphs, labels = data labels = labels.to(args.device) total += len(labels) outputs = net(graphs) _, predicted = torch.max(outputs.data, 1) total_correct += (predicted == labels.data).sum().item() loss = criterion(outputs, labels) # crossentropy(reduce=True) for default total_loss += loss.item() * len(labels) loss, acc = 1.0*total_loss / total, 1.0*total_correct / total net.train() return loss, acc def main(args): # set up seeds, args.seed supported torch.manual_seed(seed=0) np.random.seed(seed=0) is_cuda = not args.disable_cuda and torch.cuda.is_available() if is_cuda: args.device = torch.device("cuda:" + str(args.device)) torch.cuda.manual_seed_all(seed=0) else: args.device = torch.device("cpu") dataset = GINDataset(args.dataset, not args.learn_eps) trainloader, validloader = GraphDataLoader( dataset, batch_size=args.batch_size, device=args.device, collate_fn=collate, seed=args.seed, shuffle=True, split_name='fold10', fold_idx=args.fold_idx).train_valid_loader() # or split_name='rand', split_ratio=0.7 model = GIN( args.num_layers, args.num_mlp_layers, dataset.dim_nfeats, args.hidden_dim, dataset.gclasses, args.final_dropout, args.learn_eps, args.graph_pooling_type, args.neighbor_pooling_type, args.device).to(args.device) criterion = nn.CrossEntropyLoss() # defaul reduce is true optimizer = optim.Adam(model.parameters(), lr=args.lr) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.5) # it's not cost-effective to hanle the cursor and init 0 # https://stackoverflow.com/a/23121189 tbar = tqdm(range(args.epochs), unit="epoch", position=3, ncols=0, file=sys.stdout) vbar = tqdm(range(args.epochs), unit="epoch", position=4, ncols=0, file=sys.stdout) lrbar = tqdm(range(args.epochs), unit="epoch", position=5, ncols=0, file=sys.stdout) for epoch, _, _ in zip(tbar, vbar, lrbar): scheduler.step() train(args, model, trainloader, optimizer, criterion, epoch) train_loss, train_acc = eval_net( args, model, trainloader, criterion) tbar.set_description( 'train set - average loss: {:.4f}, accuracy: {:.0f}%' .format(train_loss, 100. * train_acc)) valid_loss, valid_acc = eval_net( args, model, validloader, criterion) vbar.set_description( 'valid set - average loss: {:.4f}, accuracy: {:.0f}%' .format(valid_loss, 100. * valid_acc)) if not args.filename == "": with open(args.filename, 'a') as f: f.write('%s %s %s %s' % ( args.dataset, args.learn_eps, args.neighbor_pooling_type, args.graph_pooling_type )) f.write("\n") f.write("%f %f %f %f" % ( train_loss, train_acc, valid_loss, valid_acc )) f.write("\n") lrbar.set_description( "the learning eps with learn_eps={} is: {}".format( args.learn_eps, model.eps.data)) tbar.close() vbar.close() lrbar.close() if __name__ == '__main__': args = Parser(description='GIN').args print('show all arguments configuration...') print(args) main(args)