Unverified Commit 17be9e16 authored by Hang Zhang's avatar Hang Zhang Committed by GitHub
Browse files

fix miscs (#258)

parent b872eb8c
...@@ -11,7 +11,6 @@ import time ...@@ -11,7 +11,6 @@ import time
import argparse import argparse
import numpy as np import numpy as np
from tqdm import tqdm from tqdm import tqdm
#from mpi4py import MPI
import torch import torch
from torch.utils import data from torch.utils import data
...@@ -139,21 +138,15 @@ class Options(): ...@@ -139,21 +138,15 @@ class Options():
print(args) print(args)
return args return args
#def mpi_avg_all(*args): def torch_dist_avg(gpu, *args):
# comm = MPI.COMM_WORLD
# # send to master
# sum_args = []
# for arg in args:
# sum_args.append(sum(comm.gather(arg, root=0)))
# sum_args = [item / len(args) for item in sum_args]
# return tuple(sum_args)
def torch_dist_avg(*args):
process_group = torch.distributed.group.WORLD process_group = torch.distributed.group.WORLD
tensor_args = [] tensor_args = []
pending_res = [] pending_res = []
for arg in args: for arg in args:
tensor_arg = torch.tensor(arg) if isinstance(arg, torch.Tensor):
tensor_arg = arg.clone().reshape(1).detach().cuda(gpu)
else:
tensor_arg = torch.tensor(arg).reshape(1).cuda(gpu)
tensor_args.append(tensor_arg) tensor_args.append(tensor_arg)
pending_res.append(torch.distributed.all_reduce(tensor_arg, group=process_group, async_op=True)) pending_res.append(torch.distributed.all_reduce(tensor_arg, group=process_group, async_op=True))
for res in pending_res: for res in pending_res:
...@@ -292,7 +285,7 @@ def main_worker(gpu, ngpus_per_node, args): ...@@ -292,7 +285,7 @@ def main_worker(gpu, ngpus_per_node, args):
print('pixAcc: %.3f, mIoU: %.3f' % (pixAcc, mIoU)) print('pixAcc: %.3f, mIoU: %.3f' % (pixAcc, mIoU))
if args.gpu == 0: if args.gpu == 0:
pixAcc, mIoU = torch_dist_avg(pixAcc, mIoU) pixAcc, mIoU = torch_dist_avg(args.gpu, pixAcc, mIoU)
print('pixAcc: %.3f, mIoU: %.3f' % (pixAcc, mIoU)) print('pixAcc: %.3f, mIoU: %.3f' % (pixAcc, mIoU))
new_pred = (pixAcc + mIoU)/2 new_pred = (pixAcc + mIoU)/2
......
...@@ -13,8 +13,6 @@ import os ...@@ -13,8 +13,6 @@ import os
import subprocess import subprocess
from setuptools import setup, find_packages from setuptools import setup, find_packages
import setuptools.command.develop
import setuptools.command.install
cwd = os.path.dirname(os.path.abspath(__file__)) cwd = os.path.dirname(os.path.abspath(__file__))
...@@ -35,19 +33,6 @@ def create_version_file(): ...@@ -35,19 +33,6 @@ def create_version_file():
f.write('"""This is encoding version file."""\n') f.write('"""This is encoding version file."""\n')
f.write("__version__ = '{}'\n".format(version)) f.write("__version__ = '{}'\n".format(version))
# run test scrip after installation
class install(setuptools.command.install.install):
def run(self):
create_version_file()
setuptools.command.install.install.run(self)
class develop(setuptools.command.develop.develop):
def run(self):
create_version_file()
setuptools.command.develop.develop.run(self)
readme = open('README.md').read()
requirements = [ requirements = [
'numpy', 'numpy',
'tqdm', 'tqdm',
...@@ -60,28 +45,26 @@ requirements = [ ...@@ -60,28 +45,26 @@ requirements = [
'requests', 'requests',
] ]
setup( if __name__ == '__main__':
name="torch-encoding", create_version_file()
version=version, setup(
author="Hang Zhang", name="torch-encoding",
author_email="zhanghang0704@gmail.com", version=version,
url="https://github.com/zhanghang1989/PyTorch-Encoding", author="Hang Zhang",
description="PyTorch Encoding Package", author_email="zhanghang0704@gmail.com",
long_description=readme, url="https://github.com/zhanghang1989/PyTorch-Encoding",
long_description_content_type='text/markdown', description="PyTorch Encoding Package",
license='MIT', long_description=open('README.md').read(),
install_requires=requirements, long_description_content_type='text/markdown',
packages=find_packages(exclude=["tests", "experiments"]), license='MIT',
package_data={ 'encoding': [ install_requires=requirements,
'LICENSE', packages=find_packages(exclude=["tests", "experiments"]),
'lib/cpu/*.h', package_data={ 'encoding': [
'lib/cpu/*.cpp', 'LICENSE',
'lib/gpu/*.h', 'lib/cpu/*.h',
'lib/gpu/*.cpp', 'lib/cpu/*.cpp',
'lib/gpu/*.cu', 'lib/gpu/*.h',
]}, 'lib/gpu/*.cpp',
cmdclass={ 'lib/gpu/*.cu',
'install': install, ]},
'develop': develop, )
},
)
import argparse ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from tqdm import tqdm from tqdm import tqdm
from torch.utils import data from torch.utils import data
import torchvision.transforms as transform import torchvision.transforms as transform
from encoding.datasets import get_segmentation_dataset from encoding.datasets import get_segmentation_dataset
def main(): def test_ade_dataset():
parser = argparse.ArgumentParser(description='Test Dataset.')
parser.add_argument('--dataset', type=str, default='ade20k',
help='dataset name (default: pascal12)')
args = parser.parse_args()
input_transform = transform.Compose([ def test_dataset(dataset_name):
transform.ToTensor(), input_transform = transform.Compose([
transform.Normalize([.485, .456, .406], [.229, .224, .225])]) transform.ToTensor(),
trainset = get_segmentation_dataset(args.dataset, split='val', mode='train', transform.Normalize([.485, .456, .406], [.229, .224, .225])])
transform=input_transform) trainset = get_segmentation_dataset(dataset_name, split='val', mode='train',
trainloader = data.DataLoader(trainset, batch_size=16, transform=input_transform)
drop_last=True, shuffle=True) trainloader = data.DataLoader(trainset, batch_size=16,
tbar = tqdm(trainloader) drop_last=True, shuffle=True)
max_label = -10 tbar = tqdm(trainloader)
for i, (image, target) in enumerate(tbar): max_label = -10
tmax = target.max().item() for i, (image, target) in enumerate(tbar):
tmin = target.min().item() tmax = target.max().item()
assert(tmin >= -1) tmin = target.min().item()
if tmax > max_label: assert(tmin >= -1)
max_label = tmax if tmax > max_label:
assert(max_label < trainset.NUM_CLASS) max_label = tmax
tbar.set_description("Batch %d, max label %d"%(i, max_label)) assert(max_label < trainset.NUM_CLASS)
tbar.set_description("Batch %d, max label %d"%(i, max_label))
test_dataset('ade20k')
if __name__ == "__main__": if __name__ == "__main__":
main() import nose
nose.runmodule()
...@@ -55,21 +55,6 @@ def test_moments(): ...@@ -55,21 +55,6 @@ def test_moments():
test = gradcheck(encoding.functions.moments, input, eps=EPS, atol=ATOL) test = gradcheck(encoding.functions.moments, input, eps=EPS, atol=ATOL)
print('Testing moments(): {}'.format(test)) print('Testing moments(): {}'.format(test))
def test_syncbn_func():
# generate input
B, C, H = 2, 3, 4
X = Variable(torch.cuda.DoubleTensor(B,C,H).uniform_(-0.5, 0.5),
requires_grad=True)
gamma = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
beta = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
mean = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
std = Variable(torch.cuda.DoubleTensor(C).uniform_(-0.5, 0.5), requires_grad=True)
N = B * H
inputs = (X, mean, std, gamma, beta)
# grad check
test = gradcheck(encoding.functions.batchnormtrain, inputs, eps=EPS, atol=ATOL)
print('Testing batchnorm(): {}'.format(test))
def test_non_max_suppression(): def test_non_max_suppression():
def _test_nms(cuda): def _test_nms(cuda):
# check a small test case # check a small test case
......
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## Email: zhanghang0704@gmail.com
## Copyright (c) 2020
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import torch
import encoding
def test_model_inference():
x = torch.rand(1, 3, 224, 224)
for model_name in encoding.models.pretrained_model_list():
print('Doing: ', model_name)
if 'wideresnet' in model_name: continue # need multi-gpu
model = encoding.models.get_model(model_name, pretrained=True)
model.eval()
y = model(x)
if __name__ == "__main__":
import nose
nose.runmodule()
...@@ -19,11 +19,13 @@ def test_segmentation_metrics(): ...@@ -19,11 +19,13 @@ def test_segmentation_metrics():
pixAcc = 1.0 * pixel_correct / (np.spacing(1) + pixel_labeled) pixAcc = 1.0 * pixel_correct / (np.spacing(1) + pixel_labeled)
IoU = 1.0 * area_inter / (np.spacing(1) + area_union) IoU = 1.0 * area_inter / (np.spacing(1) + area_union)
mIoU = IoU.mean() mIoU = IoU.mean()
print('numpy predictionis :',pixAcc, mIoU) print('numpy predictionis :', pixAcc, mIoU)
# torch metric prediction # torch metric prediction
pixel_correct, pixel_labeled = batch_pix_accuracy(tim_pred, tim_lab) pixel_correct, pixel_labeled = batch_pix_accuracy(tim_pred, tim_lab)
area_inter, area_union = batch_intersection_union(tim_pred, tim_lab, nclass) area_inter, area_union = batch_intersection_union(tim_pred, tim_lab, nclass)
pixAcc = 1.0 * pixel_correct / (np.spacing(1) + pixel_labeled) batch_pixAcc = 1.0 * pixel_correct / (np.spacing(1) + pixel_labeled)
IoU = 1.0 * area_inter / (np.spacing(1) + area_union) IoU = 1.0 * area_inter / (np.spacing(1) + area_union)
mIoU = IoU.mean() batch_mIoU = IoU.mean()
print('torch predictionis :',pixAcc, mIoU) print('torch predictionis :', batch_pixAcc, batch_mIoU)
assert (batch_pixAcc - pixAcc) < 1e-3
assert (batch_mIoU - mIoU) < 1e-3
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment