Commit faa48887 authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

cuda check

parent 1ce47cb5
...@@ -124,7 +124,9 @@ python VGGplus.py ...@@ -124,7 +124,9 @@ python VGGplus.py
Tested with CUDA 10.0, Ubuntu 18.04, Python 3.6 with [Conda](https://www.anaconda.com/) and PyTorch 1.1. Tested with CUDA 10.0, Ubuntu 18.04, Python 3.6 with [Conda](https://www.anaconda.com/) and PyTorch 1.1.
``` ```
conda install pytorch-nightly -c pytorch # See https://pytorch.org/get-started/locally/ conda install pytorch torchvision cudatoolkit=10.0 -c pytorch # See https://pytorch.org/get-started/locally/
conda install google-sparsehash -c bioconda
conda install -c anaconda pillow
git clone git@github.com:facebookresearch/SparseConvNet.git git clone git@github.com:facebookresearch/SparseConvNet.git
cd SparseConvNet/ cd SparseConvNet/
bash develop.sh bash develop.sh
...@@ -148,6 +150,7 @@ SparseConvNet is BSD licensed, as found in the LICENSE file. ...@@ -148,6 +150,7 @@ SparseConvNet is BSD licensed, as found in the LICENSE file.
8. [Workshop on Learning to See from 3D Data, 2017](https://shapenet.cs.stanford.edu/iccv17workshop/) First place in the [semantic segmentation](https://shapenet.cs.stanford.edu/iccv17/) competition. [Report](https://arxiv.org/pdf/1710.06104) 8. [Workshop on Learning to See from 3D Data, 2017](https://shapenet.cs.stanford.edu/iccv17workshop/) First place in the [semantic segmentation](https://shapenet.cs.stanford.edu/iccv17/) competition. [Report](https://arxiv.org/pdf/1710.06104)
9. [3D Semantic Segmentation with Submanifold Sparse Convolutional Networks, 2017](https://arxiv.org/abs/1711.10275) Semantic segmentation for the ShapeNet Core55 and NYU-DepthV2 datasets, CVPR 2018 9. [3D Semantic Segmentation with Submanifold Sparse Convolutional Networks, 2017](https://arxiv.org/abs/1711.10275) Semantic segmentation for the ShapeNet Core55 and NYU-DepthV2 datasets, CVPR 2018
10. [ScanNet 3D semantic label benchmark 2018](http://kaldir.vc.in.tum.de/scannet_benchmark/semantic_label_3d) 0.726 average IOU. 10. [ScanNet 3D semantic label benchmark 2018](http://kaldir.vc.in.tum.de/scannet_benchmark/semantic_label_3d) 0.726 average IOU.
11. [https://github.com/StanfordVL/MinkowskiEngine] MinkowskiEngine is an alternative implementation of SparseConvNet.
## Citations ## Citations
......
...@@ -6,8 +6,5 @@ ...@@ -6,8 +6,5 @@
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.
rm -rf build/ dist/ sparseconvnet.egg-info sparseconvnet_SCN*.so rm -rf build/ dist/ sparseconvnet.egg-info sparseconvnet_SCN*.so
conda install google-sparsehash -c bioconda
conda install -c anaconda pillow
conda install scipy
python setup.py develop python setup.py develop
python examples/hello-world.py python examples/hello-world.py
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
scale=20 #Voxel size = 1/scale scale=20 #Voxel size = 1/scale
val_reps=1 # Number of test views, 1 or more val_reps=1 # Number of test views, 1 or more
batch_size=32 batch_size=32
elastic_deformation=False
import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp
...@@ -19,11 +20,11 @@ VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 2 ...@@ -19,11 +20,11 @@ VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 2
train,val=[],[] train,val=[],[]
for x in torch.utils.data.DataLoader( for x in torch.utils.data.DataLoader(
glob.glob('train/*.pth'), glob.glob('train/*.pth')[::10],
collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()): collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()):
train.append(x) train.append(x)
for x in torch.utils.data.DataLoader( for x in torch.utils.data.DataLoader(
glob.glob('val/*.pth'), glob.glob('val/*.pth')[::10],
collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()): collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()):
val.append(x) val.append(x)
print('Training examples:', len(train)) print('Training examples:', len(train))
...@@ -60,8 +61,9 @@ def trainMerge(tbl): ...@@ -60,8 +61,9 @@ def trainMerge(tbl):
theta=np.random.rand()*2*math.pi theta=np.random.rand()*2*math.pi
m=np.matmul(m,[[math.cos(theta),math.sin(theta),0],[-math.sin(theta),math.cos(theta),0],[0,0,1]]) m=np.matmul(m,[[math.cos(theta),math.sin(theta),0],[-math.sin(theta),math.cos(theta),0],[0,0,1]])
a=np.matmul(a,m) a=np.matmul(a,m)
a=elastic(a,6*scale//50,40*scale/50) if elastic_deformation:
a=elastic(a,20*scale//50,160*scale/50) a=elastic(a,6*scale//50,40*scale/50)
a=elastic(a,20*scale//50,160*scale/50)
m=a.min(0) m=a.min(0)
M=a.max(0) M=a.max(0)
q=M-m q=M-m
......
...@@ -8,6 +8,9 @@ import torch, os ...@@ -8,6 +8,9 @@ import torch, os
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from setuptools import setup, find_packages from setuptools import setup, find_packages
if torch.cuda.is_available():
assert torch.matmul(torch.ones(2097153,2).cuda(),torch.ones(2,2).cuda()).min().item()==2, 'Please upgrade from CUDA 9.0 to CUDA 10.0+'
this_dir = os.path.dirname(os.path.realpath(__file__)) this_dir = os.path.dirname(os.path.realpath(__file__))
torch_dir = os.path.dirname(torch.__file__) torch_dir = os.path.dirname(torch.__file__)
conda_include_dir = '/'.join(torch_dir.split('/')[:-4]) + '/include' conda_include_dir = '/'.join(torch_dir.split('/')[:-4]) + '/include'
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment