train_cls.py 5.12 KB
Newer Older
彭卓清's avatar
彭卓清 committed
1
2
3
4
import argparse
import os
import urllib
from functools import partial
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
5

彭卓清's avatar
彭卓清 committed
6
import dgl
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
7
import provider
彭卓清's avatar
彭卓清 committed
8
import torch
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
9
10
11
12
13
14
15
16
17
18
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import tqdm
from bipointnet2 import BiPointNet2SSGCls
from bipointnet_cls import BiPointNetCls
from dgl.data.utils import download, get_download_dir
from ModelNetDataLoader import ModelNetDataLoader
from torch.utils.data import DataLoader

彭卓清's avatar
彭卓清 committed
19
20
21
22
23
24
torch.backends.cudnn.enabled = False


# from dataset import ModelNet

parser = argparse.ArgumentParser()
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
25
26
27
28
29
30
31
parser.add_argument("--model", type=str, default="bipointnet")
parser.add_argument("--dataset-path", type=str, default="")
parser.add_argument("--load-model-path", type=str, default="")
parser.add_argument("--save-model-path", type=str, default="")
parser.add_argument("--num-epochs", type=int, default=200)
parser.add_argument("--num-workers", type=int, default=0)
parser.add_argument("--batch-size", type=int, default=32)
彭卓清's avatar
彭卓清 committed
32
33
34
35
36
args = parser.parse_args()

num_workers = args.num_workers
batch_size = args.batch_size

Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
37
data_filename = "modelnet40_normal_resampled.zip"
彭卓清's avatar
彭卓清 committed
38
39
download_path = os.path.join(get_download_dir(), data_filename)
local_path = args.dataset_path or os.path.join(
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
40
41
    get_download_dir(), "modelnet40_normal_resampled"
)
彭卓清's avatar
彭卓清 committed
42
if not os.path.exists(local_path):
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
43
44
45
46
47
    download(
        "https://shapenet.cs.stanford.edu/media/modelnet40_normal_resampled.zip",
        download_path,
        verify_ssl=False,
    )
彭卓清's avatar
彭卓清 committed
48
    from zipfile import ZipFile
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
49

彭卓清's avatar
彭卓清 committed
50
51
52
53
54
55
56
57
    with ZipFile(download_path) as z:
        z.extractall(path=get_download_dir())

CustomDataLoader = partial(
    DataLoader,
    num_workers=num_workers,
    batch_size=batch_size,
    shuffle=True,
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
58
59
    drop_last=True,
)
彭卓清's avatar
彭卓清 committed
60
61
62
63
64
65
66
67
68
69
70
71
72
73


def train(net, opt, scheduler, train_loader, dev):
    net.train()

    total_loss = 0
    num_batches = 0
    total_correct = 0
    count = 0
    loss_f = nn.CrossEntropyLoss()
    with tqdm.tqdm(train_loader, ascii=True) as tq:
        for data, label in tq:
            data = data.data.numpy()
            data = provider.random_point_dropout(data)
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
74
            data[:, :, 0:3] = provider.random_scale_point_cloud(data[:, :, 0:3])
彭卓清's avatar
彭卓清 committed
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
            data[:, :, 0:3] = provider.jitter_point_cloud(data[:, :, 0:3])
            data[:, :, 0:3] = provider.shift_point_cloud(data[:, :, 0:3])
            data = torch.tensor(data)
            label = label[:, 0]

            num_examples = label.shape[0]
            data, label = data.to(dev), label.to(dev).squeeze().long()
            opt.zero_grad()
            logits = net(data)
            loss = loss_f(logits, label)
            loss.backward()
            opt.step()

            _, preds = logits.max(1)

            num_batches += 1
            count += num_examples
            loss = loss.item()
            correct = (preds == label).sum().item()
            total_loss += loss
            total_correct += correct

Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
97
98
99
100
101
102
            tq.set_postfix(
                {
                    "AvgLoss": "%.5f" % (total_loss / num_batches),
                    "AvgAcc": "%.5f" % (total_correct / count),
                }
            )
彭卓清's avatar
彭卓清 committed
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
    scheduler.step()


def evaluate(net, test_loader, dev):
    net.eval()

    total_correct = 0
    count = 0

    with torch.no_grad():
        with tqdm.tqdm(test_loader, ascii=True) as tq:
            for data, label in tq:
                label = label[:, 0]
                num_examples = label.shape[0]
                data, label = data.to(dev), label.to(dev).squeeze().long()
                logits = net(data)
                _, preds = logits.max(1)

                correct = (preds == label).sum().item()
                total_correct += correct
                count += num_examples

Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
125
                tq.set_postfix({"AvgAcc": "%.5f" % (total_correct / count)})
彭卓清's avatar
彭卓清 committed
126
127
128
129
130
131

    return total_correct / count


dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")

Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
132
if args.model == "bipointnet":
彭卓清's avatar
彭卓清 committed
133
    net = BiPointNetCls(40, input_dims=6)
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
134
elif args.model == "bipointnet2_ssg":
彭卓清's avatar
彭卓清 committed
135
136
137
138
139
140
141
142
143
144
    net = BiPointNet2SSGCls(40, batch_size, input_dims=6)

net = net.to(dev)
if args.load_model_path:
    net.load_state_dict(torch.load(args.load_model_path, map_location=dev))

opt = optim.Adam(net.parameters(), lr=1e-3, weight_decay=1e-4)

scheduler = optim.lr_scheduler.StepLR(opt, step_size=20, gamma=0.7)

Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
145
146
train_dataset = ModelNetDataLoader(local_path, 1024, split="train")
test_dataset = ModelNetDataLoader(local_path, 1024, split="test")
彭卓清's avatar
彭卓清 committed
147
train_loader = torch.utils.data.DataLoader(
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
148
149
150
151
152
153
    train_dataset,
    batch_size=batch_size,
    shuffle=True,
    num_workers=num_workers,
    drop_last=True,
)
彭卓清's avatar
彭卓清 committed
154
test_loader = torch.utils.data.DataLoader(
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
155
156
157
158
159
160
    test_dataset,
    batch_size=batch_size,
    shuffle=False,
    num_workers=num_workers,
    drop_last=True,
)
彭卓清's avatar
彭卓清 committed
161
162
163
164
165
166

best_test_acc = 0

for epoch in range(args.num_epochs):
    train(net, opt, scheduler, train_loader, dev)
    if (epoch + 1) % 1 == 0:
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
167
        print("Epoch #%d Testing" % epoch)
彭卓清's avatar
彭卓清 committed
168
169
170
171
172
        test_acc = evaluate(net, test_loader, dev)
        if test_acc > best_test_acc:
            best_test_acc = test_acc
            if args.save_model_path:
                torch.save(net.state_dict(), args.save_model_path)
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
173
        print("Current test acc: %.5f (best: %.5f)" % (test_acc, best_test_acc))