Unverified Commit f8919197 authored by Hang Zhang's avatar Hang Zhang Committed by GitHub
Browse files
parent d4e19553
...@@ -36,7 +36,7 @@ class Trainer(): ...@@ -36,7 +36,7 @@ class Trainer():
# dataset # dataset
data_kwargs = {'transform': input_transform, 'base_size': args.base_size, data_kwargs = {'transform': input_transform, 'base_size': args.base_size,
'crop_size': args.crop_size} 'crop_size': args.crop_size}
trainset = get_segmentation_dataset(args.dataset, split='train', mode='train', trainset = get_segmentation_dataset(args.dataset, split=args.train_split, mode='train',
**data_kwargs) **data_kwargs)
testset = get_segmentation_dataset(args.dataset, split='val', mode ='val', testset = get_segmentation_dataset(args.dataset, split='val', mode ='val',
**data_kwargs) **data_kwargs)
...@@ -60,16 +60,13 @@ class Trainer(): ...@@ -60,16 +60,13 @@ class Trainer():
params_list.append({'params': model.head.parameters(), 'lr': args.lr*10}) params_list.append({'params': model.head.parameters(), 'lr': args.lr*10})
if hasattr(model, 'auxlayer'): if hasattr(model, 'auxlayer'):
params_list.append({'params': model.auxlayer.parameters(), 'lr': args.lr*10}) params_list.append({'params': model.auxlayer.parameters(), 'lr': args.lr*10})
optimizer = torch.optim.SGD(params_list, optimizer = torch.optim.SGD(params_list, lr=args.lr,
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
momentum=args.momentum,
weight_decay=args.weight_decay)
# clear start epoch if fine-tuning
if args.ft:
args.start_epoch = 0
# criterions # criterions
self.criterion = SegmentationLosses(se_loss=args.se_loss, aux=args.aux, self.criterion = SegmentationLosses(se_loss=args.se_loss, aux=args.aux,
nclass=self.nclass) nclass=self.nclass,
se_weight=args.se_weight,
aux_weight=args.aux_weight)
self.model, self.optimizer = model, optimizer self.model, self.optimizer = model, optimizer
# using cuda # using cuda
if args.cuda: if args.cuda:
...@@ -90,6 +87,9 @@ class Trainer(): ...@@ -90,6 +87,9 @@ class Trainer():
self.best_pred = checkpoint['best_pred'] self.best_pred = checkpoint['best_pred']
print("=> loaded checkpoint '{}' (epoch {})" print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch'])) .format(args.resume, checkpoint['epoch']))
# clear start epoch if fine-tuning
if args.ft:
args.start_epoch = 0
# lr scheduler # lr scheduler
self.scheduler = utils.LR_Scheduler(args.lr_scheduler, args.lr, self.scheduler = utils.LR_Scheduler(args.lr_scheduler, args.lr,
args.epochs, len(self.trainloader)) args.epochs, len(self.trainloader))
...@@ -172,9 +172,9 @@ if __name__ == "__main__": ...@@ -172,9 +172,9 @@ if __name__ == "__main__":
args = Options().parse() args = Options().parse()
torch.manual_seed(args.seed) torch.manual_seed(args.seed)
trainer = Trainer(args) trainer = Trainer(args)
print('Starting Epoch:', args.start_epoch) print('Starting Epoch:', trainer.args.start_epoch)
print('Total Epoches:', args.epochs) print('Total Epoches:', trainer.args.epochs)
for epoch in range(args.start_epoch, args.epochs): for epoch in range(trainer.args.start_epoch, trainer.args.epochs):
trainer.training(epoch) trainer.training(epoch)
if not args.no_val: if not trainer.args.no_val:
trainer.validation(epoch) trainer.validation(epoch)
"""Prepare ADE20K dataset"""
import os
import shutil
import argparse
import zipfile
from encoding.utils import download, mkdir, check_sha1
_TARGET_DIR = os.path.expanduser('~/.encoding/data')
def parse_args():
parser = argparse.ArgumentParser(
description='Initialize ADE20K dataset.',
epilog='Example: python prepare_cityscapes.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', default=None, help='dataset directory on disk')
args = parser.parse_args()
return args
def download_city(path, overwrite=False):
_CITY_DOWNLOAD_URLS = [
#('gtCoarse.zip', '61f23198bfff5286e0d7e316ad5c4dbbaaf4717a'),
('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'),
('leftImg8bit_trainvaltest.zip', '2c0b77ce9933cc635adda307fbba5566f5d9d404')]
download_dir = os.path.join(path, 'downloads')
mkdir(download_dir)
for filename, checksum in _CITY_DOWNLOAD_URLS:
if not check_sha1(filename, checksum):
raise UserWarning('File {} is downloaded but the content hash does not match. ' \
'The repo may be outdated or download may be incomplete. ' \
'If the "repo_url" is overridden, consider switching to ' \
'the default repo.'.format(filename))
# extract
with zipfile.ZipFile(filename,"r") as zip_ref:
zip_ref.extractall(path=path)
print("Extracted", filename)
if __name__ == '__main__':
args = parse_args()
mkdir(os.path.expanduser('~/.encoding/data'))
mkdir(os.path.expanduser('~/.encoding/data/cityscapes'))
if args.download_dir is not None:
if os.path.isdir(_TARGET_DIR):
os.remove(_TARGET_DIR)
# make symlink
os.symlink(args.download_dir, _TARGET_DIR)
else:
download_city(_TARGET_DIR, overwrite=False)
...@@ -24,10 +24,10 @@ def download_coco(path, overwrite=False): ...@@ -24,10 +24,10 @@ def download_coco(path, overwrite=False):
'8551ee4bb5860311e79dace7e79cb91e432e78b3'), '8551ee4bb5860311e79dace7e79cb91e432e78b3'),
('http://images.cocodataset.org/zips/val2017.zip', ('http://images.cocodataset.org/zips/val2017.zip',
'4950dc9d00dbe1c933ee0170f5797584351d2a41'), '4950dc9d00dbe1c933ee0170f5797584351d2a41'),
('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip', #('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip',
'e7aa0f7515c07e23873a9f71d9095b06bcea3e12'), # '46cdcf715b6b4f67e980b529534e79c2edffe084'),
('http://images.cocodataset.org/zips/test2017.zip', #('http://images.cocodataset.org/zips/test2017.zip',
'99813c02442f3c112d491ea6f30cecf421d0e6b3'), # '99813c02442f3c112d491ea6f30cecf421d0e6b3'),
] ]
mkdir(path) mkdir(path)
for url, checksum in _DOWNLOAD_URLS: for url, checksum in _DOWNLOAD_URLS:
......
import os
import shutil
import argparse
import tarfile
from encoding.utils import download, mkdir
_TARGET_DIR = os.path.expanduser('~/.encoding/data')
def parse_args():
parser = argparse.ArgumentParser(
description='Initialize MINC dataset.',
epilog='Example: python prepare_minc.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', type=str, default=None, help='dataset directory on disk')
parser.add_argument('--no-download', action='store_true', help='disable automatic download if set')
parser.add_argument('--overwrite', action='store_true',
help='overwrite downloaded files if set, in case they are corrputed')
args = parser.parse_args()
return args
def download_minc(path, overwrite=False):
_AUG_DOWNLOAD_URLS = [
('http://opensurfaces.cs.cornell.edu/static/minc/minc-2500.tar.gz', 'bcccbb3b1ab396ef540f024a5ba23eff54f7fe31')]
download_dir = os.path.join(path, 'downloads')
mkdir(download_dir)
for url, checksum in _AUG_DOWNLOAD_URLS:
filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
# extract
with tarfile.open(filename) as tar:
tar.extractall(path=path)
if __name__ == '__main__':
args = parse_args()
mkdir(os.path.expanduser('~/.encoding/datasets'))
if args.download_dir is not None:
if os.path.isdir(_TARGET_DIR):
os.remove(_TARGET_DIR)
os.symlink(args.download_dir, _TARGET_DIR)
else:
download_minc(_TARGET_DIR, overwrite=False)
...@@ -18,7 +18,7 @@ import setuptools.command.install ...@@ -18,7 +18,7 @@ import setuptools.command.install
cwd = os.path.dirname(os.path.abspath(__file__)) cwd = os.path.dirname(os.path.abspath(__file__))
version = '0.4.5' version = '0.5.0'
try: try:
sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
cwd=cwd).decode('ascii').strip() cwd=cwd).decode('ascii').strip()
......
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import numpy as np
import torch
from torch.autograd import Variable, gradcheck
import encoding
EPS = 1e-3
ATOL = 1e-3
def _assert_tensor_close(a, b, atol=ATOL, rtol=EPS):
npa, npb = a.cpu().numpy(), b.cpu().numpy()
assert np.allclose(npa, npb, rtol=rtol, atol=atol), \
'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(
a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
def test_aggregate():
B,N,K,D = 2,3,4,5
A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5),
requires_grad=True)
X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5),
requires_grad=True)
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
input = (A, X, C)
test = gradcheck(encoding.functions.aggregate, input, eps=EPS, atol=ATOL)
print('Testing aggregate(): {}'.format(test))
def test_scaled_l2():
B,N,K,D = 2,3,4,5
X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5),
requires_grad=True)
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5),
requires_grad=True)
input = (X, C, S)
test = gradcheck(encoding.functions.scaled_l2, input, eps=EPS, atol=ATOL)
print('Testing scaled_l2(): {}'.format(test))
def test_aggregate_v2():
def py_aggregate_v2(A, X, C, STD, S):
B, N, D = X.size()
K = C.size(0)
#e_{k} = \sum_{i=1}^{N} a_{ik} (x_i - d_k) / \sigma_k
R = (X.view(B, N, 1, D).expand(B, N, K, D) - \
C.view(1, 1, K, D).expand(B, N, K, D)) / STD.view(1, 1, K, D)
#E = 1.0 / torch.sqrt(S + 1e-5).unsqueeze(0).unsqueeze(2) * (A.unsqueeze(3) * R).sum(1)
E2 = (A.unsqueeze(3) * R).sum(1)
return E2
B,N,K,D = 2,3,4,5
A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5),
requires_grad=True)
X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5),
requires_grad=True)
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
STD = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5),
requires_grad=True)
A2 = torch.from_numpy(A.detach().cpu().numpy()).cuda()
X2 = torch.from_numpy(X.detach().cpu().numpy()).cuda()
C2 = torch.from_numpy(C.detach().cpu().numpy()).cuda()
STD2 = torch.from_numpy(STD.detach().cpu().numpy()).cuda()
S2 = torch.from_numpy(S.detach().cpu().numpy()).cuda()
A2.requires_grad_()
X2.requires_grad_()
C2.requires_grad_()
STD2.requires_grad_()
S2.requires_grad_()
E = encoding.functions.aggregate_v2(A, X, C, STD)
E2 = py_aggregate_v2(A2, X2, C2, STD2, S2)
_assert_tensor_close(E.detach(), E2.detach())
input = (A, X, C, STD)
test = gradcheck(encoding.functions.aggregate_v2, input, eps=EPS, atol=ATOL)
print('Testing aggregate_v2(): {}'.format(test))
def test_encoding_dist():
def mahalanobis_dist(X, C):
B, N, D = X.size()
K = C.size(0)
# X \in BxNxD, C \in KxD
R = X.view(B, N, 1, D).expand(B, N, K, D) - \
C.view(1, 1, K, D).expand(B, N, K, D)
STD = torch.sqrt(R.pow(2).mean(0).mean(0) + 1e-6)
KD = (R / STD.view(1,1,K,D)).pow(2).sum(3)
return KD, STD
B,N,K,D = 2,3,4,5
RVar = torch.cuda.DoubleTensor(K,D).zero_()
X = torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5)
C = torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5)
X.requires_grad_()
C.requires_grad_()
X2 = torch.from_numpy(X.detach().cpu().numpy()).cuda()
C2 = torch.from_numpy(C.detach().cpu().numpy()).cuda()
X2.requires_grad_()
C2.requires_grad_()
# assert numeric correctness
KD, STD, Var_ = encoding.functions.encoding_dist(X, C, 1e-6)
KD2, STD2 = mahalanobis_dist(X2, C2)
_assert_tensor_close(STD.detach(), STD2.detach())
_assert_tensor_close(KD.detach(), KD2.detach())
# check backward
loss1 = KD.pow(2).sum() + STD.sum()
loss1.backward()
loss2 = KD2.pow(2).sum() + STD2.sum()
loss2.backward()
_assert_tensor_close(X.grad.detach(), X2.grad.detach())
_assert_tensor_close(C.grad.detach(), C2.grad.detach())
input = (X, C, 1e-6)
test = gradcheck(encoding.functions.encoding_dist, input, eps=EPS, atol=ATOL)
print('Testing encoding_dist(): {}'.format(test))
def test_encoding_dist_inference():
def mahalanobis_dist(X, C, STD):
B, N, D = X.size()
K = C.size(0)
# X \in BxNxD, C \in KxD
R = X.view(B, N, 1, D).expand(B, N, K, D) - \
C.view(1, 1, K, D).expand(B, N, K, D)
#STD = torch.sqrt(R.pow(2).mean(0).mean(0) + 1e-6)
KD = (R / STD.view(1,1,K,D)).pow(2).sum(3)
return KD
B,N,K,D = 2,3,4,5
X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5),
requires_grad=True)
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
STD = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
X2 = torch.from_numpy(X.detach().cpu().numpy()).cuda()
C2 = torch.from_numpy(C.detach().cpu().numpy()).cuda()
STD2 = torch.from_numpy(STD.detach().cpu().numpy()).cuda()
X2.requires_grad_()
C2.requires_grad_()
STD2.requires_grad_()
E = encoding.functions.encoding_dist_inference(X, C, STD)
E2 = mahalanobis_dist(X2, C2, STD2)
loss1 = E.pow(2).sum()
loss2 = E2.pow(2).sum()
loss1.backward()
loss2.backward()
print('X.grad', X.grad)
print('X2.grad', X2.grad)
_assert_tensor_close(E.detach(), E2.detach())
_assert_tensor_close(X.grad.detach(), X2.grad.detach())
_assert_tensor_close(C.grad.detach(), C2.grad.detach())
_assert_tensor_close(STD.grad.detach(), STD2.grad.detach())
input = (X, C, STD)
test = gradcheck(encoding.functions.encoding_dist_inference, input, eps=EPS, atol=ATOL)
print('Testing encoding_dist_inference(): {}'.format(test))
def test_sum_square():
B,C,H = 2,3,4
X = Variable(torch.cuda.DoubleTensor(B,C,H).uniform_(-0.5,0.5),
requires_grad=True)
input = (X,)
test = gradcheck(encoding.functions.sum_square, input, eps=EPS, atol=ATOL)
print('Testing sum_square(): {}'.format(test))
def test_syncbn_func():
# generate input
B, C, H = 2, 3, 4
X = Variable(torch.cuda.DoubleTensor(B,C,H).uniform_(-0.5, 0.5),
requires_grad=True)
gamma = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
beta = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
mean = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
std = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
N = B * H
inputs = (X, mean, std, gamma, beta)
# grad check
test = gradcheck(encoding.functions.batchnormtrain, inputs, eps=EPS, atol=ATOL)
print('Testing batchnorm(): {}'.format(test))
def test_non_max_suppression():
def _test_nms(cuda):
# check a small test case
boxes = torch.Tensor([
[[10.2, 23., 50., 20.],
[11.3, 23., 52., 20.1],
[23.2, 102.3, 23.3, 50.3],
[101.2, 32.4, 70.6, 70.],
[100.2, 30.9, 70.7, 69.]],
[[200.3, 234., 530., 320.],
[110.3, 223., 152., 420.1],
[243.2, 240.3, 50.3, 30.3],
[243.2, 236.4, 48.6, 30.],
[100.2, 310.9, 170.7, 691.]]])
scores = torch.Tensor([
[0.9, 0.7, 0.11, 0.23, 0.8],
[0.13, 0.89, 0.45, 0.23, 0.3]])
if cuda:
boxes = boxes.cuda()
scores = scores.cuda()
expected_output = (
torch.ByteTensor(
[[1, 1, 0, 0, 1], [1, 1, 1, 0, 1]]),
torch.LongTensor(
[[0, 4, 1, 3, 2], [1, 2, 4, 3, 0]])
)
mask, inds = encoding.functions.NonMaxSuppression(boxes, scores, 0.7)
_assert_tensor_close(mask, expected_output[0])
_assert_tensor_close(inds, expected_output[1])
_test_nms(False)
_test_nms(True)
if __name__ == '__main__':
import nose
nose.runmodule()
...@@ -9,10 +9,8 @@ ...@@ -9,10 +9,8 @@
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import numpy as np import numpy as np
import torch import torch
from torch.autograd import Variable, gradcheck from torch.autograd import Variable, gradcheck
import encoding import encoding
EPS = 1e-3 EPS = 1e-3
...@@ -24,32 +22,6 @@ def _assert_tensor_close(a, b, atol=ATOL, rtol=EPS): ...@@ -24,32 +22,6 @@ def _assert_tensor_close(a, b, atol=ATOL, rtol=EPS):
'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format( 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(
a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max()) a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
def test_aggregate():
B,N,K,D = 2,3,4,5
A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5),
requires_grad=True)
X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5),
requires_grad=True)
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
input = (A, X, C)
test = gradcheck(encoding.functions.aggregate, input, eps=EPS, atol=ATOL)
print('Testing aggregate(): {}'.format(test))
def test_scaledL2():
B,N,K,D = 2,3,4,5
X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5),
requires_grad=True)
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5),
requires_grad=True)
input = (X, C, S)
test = gradcheck(encoding.functions.scaledL2, input, eps=EPS, atol=ATOL)
print('Testing scaledL2(): {}'.format(test))
def test_encoding(): def test_encoding():
B,C,H,W,K = 2,3,4,5,6 B,C,H,W,K = 2,3,4,5,6
X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5), X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5),
...@@ -59,16 +31,6 @@ def test_encoding(): ...@@ -59,16 +31,6 @@ def test_encoding():
test = gradcheck(layer, input, eps=EPS, atol=ATOL) test = gradcheck(layer, input, eps=EPS, atol=ATOL)
print('Testing encoding(): {}'.format(test)) print('Testing encoding(): {}'.format(test))
def test_sum_square():
B,C,H = 2,3,4
X = Variable(torch.cuda.DoubleTensor(B,C,H).uniform_(-0.5,0.5),
requires_grad=True)
input = (X,)
test = gradcheck(encoding.functions.sum_square, input, eps=EPS, atol=ATOL)
print('Testing sum_square(): {}'.format(test))
def test_all_reduce(): def test_all_reduce():
ngpu = torch.cuda.device_count() ngpu = torch.cuda.device_count()
X = [torch.DoubleTensor(2,4,4).uniform_(-0.5,0.5).cuda(i) for i in range(ngpu)] X = [torch.DoubleTensor(2,4,4).uniform_(-0.5,0.5).cuda(i) for i in range(ngpu)]
...@@ -82,42 +44,8 @@ def test_all_reduce(): ...@@ -82,42 +44,8 @@ def test_all_reduce():
test = gradcheck(encoding.parallel.allreduce, input, eps=EPS, atol=ATOL) test = gradcheck(encoding.parallel.allreduce, input, eps=EPS, atol=ATOL)
print('Testing allreduce(): {}'.format(test)) print('Testing allreduce(): {}'.format(test))
def test_syncbn():
train_mode=True
# generate input
B,C,H,W = 8,3,4,5
X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5),
requires_grad=True)
input = (X,)
# SyncBN using DataParallel
layer = encoding.nn.BatchNorm2d(C)
model = torch.nn.DataParallel(layer).double().cuda()
encoding.parallel.patch_replication_callback(model)
layer.train(train_mode)
# grad check
test = gradcheck(model, input, eps=EPS, atol=ATOL)
print('Testing BatchNorm2d(): {}'.format(test))
def test_syncbn_func():
# generate input
B, C, H = 2, 3, 4
X = Variable(torch.cuda.DoubleTensor(B,C,H).uniform_(-0.5, 0.5),
requires_grad=True)
gamma = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
beta = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
mean = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
std = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
N = B * H
inputs = (X, mean, std, gamma, beta)
# grad check
test = gradcheck(encoding.functions.batchnormtrain, inputs, eps=EPS, atol=ATOL)
print('Testing batchnorm(): {}'.format(test))
def testSyncBN(): def testSyncBN():
def _checkBatchNormResult(bn1, bn2, input, is_train, cuda=False): def _check_batchnorm_result(bn1, bn2, input, is_train, cuda=False):
def _find_bn(module): def _find_bn(module):
for m in module.modules(): for m in module.modules():
if isinstance(m, (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, if isinstance(m, (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d,
...@@ -163,8 +91,8 @@ def testSyncBN(): ...@@ -163,8 +91,8 @@ def testSyncBN():
# check with unsync version # check with unsync version
for i in range(10): for i in range(10):
print(i) print(i)
_checkBatchNormResult(bn, sync_bn, torch.rand(16, 10, 16, 16).double(), True, cuda=True) _check_batchnorm_result(bn, sync_bn, torch.rand(16, 10, 16, 16).double(), True, cuda=True)
_checkBatchNormResult(bn, sync_bn, torch.rand(16, 10, 16, 16).double(), False, cuda=True) _check_batchnorm_result(bn, sync_bn, torch.rand(16, 10, 16, 16).double(), False, cuda=True)
if __name__ == '__main__': if __name__ == '__main__':
import nose import nose
......
import torch
import numpy as np
from encoding.utils.metrics import *
def test_segmentation_metrics():
# check torch evaluation metrics
rows, cols = 640, 480
nclass = 30
# numpy data
im_lab = np.matrix(np.random.randint(0, nclass, size=(rows, cols)))
mask = np.random.random((nclass, rows, cols))
im_pred = mask.argmax(axis=0)
# torch data
tim_lab = torch.from_numpy(im_lab).unsqueeze(0).long()
tim_pred = torch.from_numpy(mask).unsqueeze(0)
# numpy prediction
pixel_correct, pixel_labeled = pixel_accuracy(im_pred, im_lab)
area_inter, area_union = intersection_and_union(im_pred, im_lab, nclass)
pixAcc = 1.0 * pixel_correct / (np.spacing(1) + pixel_labeled)
IoU = 1.0 * area_inter / (np.spacing(1) + area_union)
mIoU = IoU.mean()
print('numpy predictionis :',pixAcc, mIoU)
# torch metric prediction
pixel_correct, pixel_labeled = batch_pix_accuracy(tim_pred, tim_lab)
area_inter, area_union = batch_intersection_union(tim_pred, tim_lab, nclass)
pixAcc = 1.0 * pixel_correct / (np.spacing(1) + pixel_labeled)
IoU = 1.0 * area_inter / (np.spacing(1) + area_union)
mIoU = IoU.mean()
print('torch predictionis :',pixAcc, mIoU)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment