Commit d7f704c5 authored by Alexander Liao's avatar Alexander Liao
Browse files

fixed C++ warning and python flake8 style

parent 1111319d
...@@ -15,8 +15,8 @@ torch::Tensor radius_cpu(torch::Tensor query, torch::Tensor support, ...@@ -15,8 +15,8 @@ torch::Tensor radius_cpu(torch::Tensor query, torch::Tensor support,
AT_DISPATCH_ALL_TYPES(query.scalar_type(), "radius_cpu", [&] { AT_DISPATCH_ALL_TYPES(query.scalar_type(), "radius_cpu", [&] {
auto data_q = query.DATA_PTR<scalar_t>(); auto data_q = query.data_ptr<scalar_t>();
auto data_s = support.DATA_PTR<scalar_t>(); auto data_s = support.data_ptr<scalar_t>();
std::vector<scalar_t> queries_stl = std::vector<scalar_t>(data_q, std::vector<scalar_t> queries_stl = std::vector<scalar_t>(data_q,
data_q + query.size(0)*query.size(1)); data_q + query.size(0)*query.size(1));
std::vector<scalar_t> supports_stl = std::vector<scalar_t>(data_s, std::vector<scalar_t> supports_stl = std::vector<scalar_t>(data_s,
...@@ -34,13 +34,7 @@ torch::Tensor radius_cpu(torch::Tensor query, torch::Tensor support, ...@@ -34,13 +34,7 @@ torch::Tensor radius_cpu(torch::Tensor query, torch::Tensor support,
out = torch::from_blob(neighbors_indices_ptr, {tsize, 2}, options=options); out = torch::from_blob(neighbors_indices_ptr, {tsize, 2}, options=options);
out = out.t(); out = out.t();
auto result = torch::zeros_like(out); return out.clone();
auto index = torch::tensor({0,1});
result.index_copy_(0, index, out);
return result;
} }
...@@ -49,7 +43,7 @@ void get_size_batch(const vector<long>& batch, vector<long>& res){ ...@@ -49,7 +43,7 @@ void get_size_batch(const vector<long>& batch, vector<long>& res){
res.resize(batch[batch.size()-1]-batch[0]+1, 0); res.resize(batch[batch.size()-1]-batch[0]+1, 0);
long ind = batch[0]; long ind = batch[0];
long incr = 1; long incr = 1;
for(int i=1; i < batch.size(); i++){ for(unsigned long i=1; i < batch.size(); i++){
if(batch[i] == ind) if(batch[i] == ind)
incr++; incr++;
...@@ -81,8 +75,7 @@ torch::Tensor batch_radius_cpu(torch::Tensor query, ...@@ -81,8 +75,7 @@ torch::Tensor batch_radius_cpu(torch::Tensor query,
auto options = torch::TensorOptions().dtype(torch::kLong).device(torch::kCPU); auto options = torch::TensorOptions().dtype(torch::kLong).device(torch::kCPU);
int max_count = 0; int max_count = 0;
AT_DISPATCH_ALL_TYPES(query.scalar_type(), "batch_radius_cpu", [&] {
AT_DISPATCH_ALL_TYPES(query.scalar_type(), "batch_radius_search", [&] {
auto data_q = query.data_ptr<scalar_t>(); auto data_q = query.data_ptr<scalar_t>();
auto data_s = support.data_ptr<scalar_t>(); auto data_s = support.data_ptr<scalar_t>();
std::vector<scalar_t> queries_stl = std::vector<scalar_t>(data_q, std::vector<scalar_t> queries_stl = std::vector<scalar_t>(data_q,
......
...@@ -127,20 +127,18 @@ int batch_nanoflann_neighbors (vector<scalar_t>& queries, ...@@ -127,20 +127,18 @@ int batch_nanoflann_neighbors (vector<scalar_t>& queries,
// Initiate variables // Initiate variables
// ****************** // ******************
// indices // indices
int i0 = 0; size_t i0 = 0;
// Square radius // Square radius
const scalar_t r2 = static_cast<scalar_t>(radius*radius); const scalar_t r2 = static_cast<scalar_t>(radius*radius);
// Counting vector // Counting vector
int max_count = 0; size_t max_count = 0;
float d2;
// batch index // batch index
long b = 0; size_t b = 0;
long sum_qb = 0; size_t sum_qb = 0;
long sum_sb = 0; size_t sum_sb = 0;
float eps = 0.000001; float eps = 0.000001;
// Nanoflann related variables // Nanoflann related variables
...@@ -173,16 +171,9 @@ int batch_nanoflann_neighbors (vector<scalar_t>& queries, ...@@ -173,16 +171,9 @@ int batch_nanoflann_neighbors (vector<scalar_t>& queries,
for (auto& p0 : query_pcd.pts){ for (auto& p0 : query_pcd.pts){
// Check if we changed batch // Check if we changed batch
scalar_t query_pt[dim]; scalar_t* query_pt = new scalar_t[dim];
std::copy(p0.begin(), p0.end(), query_pt); std::copy(p0.begin(), p0.end(), query_pt);
/*
std::cout << "\n ========== \n";
for(int i=0; i < dim; i++)
std::cout << query_pt[i] << '\n';
std::cout << "\n ========== \n";
*/
if (i0 == sum_qb + q_batches[b]){ if (i0 == sum_qb + q_batches[b]){
sum_qb += q_batches[b]; sum_qb += q_batches[b];
sum_sb += s_batches[b]; sum_sb += s_batches[b];
...@@ -218,7 +209,7 @@ int batch_nanoflann_neighbors (vector<scalar_t>& queries, ...@@ -218,7 +209,7 @@ int batch_nanoflann_neighbors (vector<scalar_t>& queries,
} }
// Reserve the memory // Reserve the memory
int size = 0; // total number of edges size_t size = 0; // total number of edges
for (auto& inds_dists : all_inds_dists){ for (auto& inds_dists : all_inds_dists){
if(inds_dists.size() <= max_count) if(inds_dists.size() <= max_count)
size += inds_dists.size(); size += inds_dists.size();
...@@ -230,14 +221,14 @@ int batch_nanoflann_neighbors (vector<scalar_t>& queries, ...@@ -230,14 +221,14 @@ int batch_nanoflann_neighbors (vector<scalar_t>& queries,
sum_sb = 0; sum_sb = 0;
sum_qb = 0; sum_qb = 0;
b = 0; b = 0;
int u = 0; size_t u = 0;
for (auto& inds_dists : all_inds_dists){ for (auto& inds_dists : all_inds_dists){
if (i0 == sum_qb + q_batches[b]){ if (i0 == sum_qb + q_batches[b]){
sum_qb += q_batches[b]; sum_qb += q_batches[b];
sum_sb += s_batches[b]; sum_sb += s_batches[b];
b++; b++;
} }
for (int j = 0; j < max_count; j++){ for (size_t j = 0; j < max_count; j++){
if (j < inds_dists.size()){ if (j < inds_dists.size()){
neighbors_indices[u] = inds_dists[j].first + sum_sb; neighbors_indices[u] = inds_dists[j].first + sum_sb;
neighbors_indices[u + 1] = i0; neighbors_indices[u + 1] = i0;
......
This diff is collapsed.
from typing import Optional from typing import Optional
import torch import torch
import scipy
def sample(col, count):
if col.size(0) > count:
col = col[torch.randperm(col.size(0))][:count]
return col
def radius(x: torch.Tensor, y: torch.Tensor, r: float, def radius(x: torch.Tensor, y: torch.Tensor, r: float,
batch_x: Optional[torch.Tensor] = None, batch_x: Optional[torch.Tensor] = None,
...@@ -55,7 +50,7 @@ def radius(x: torch.Tensor, y: torch.Tensor, r: float, ...@@ -55,7 +50,7 @@ def radius(x: torch.Tensor, y: torch.Tensor, r: float,
ptr_x = deg.new_zeros(batch_size + 1) ptr_x = deg.new_zeros(batch_size + 1)
torch.cumsum(deg, 0, out=ptr_x[1:]) torch.cumsum(deg, 0, out=ptr_x[1:])
else: else:
ptr_x = None#torch.tensor([0, x.size(0)], device=x.device) ptr_x = None
if batch_y is not None: if batch_y is not None:
assert y.size(0) == batch_y.numel() assert y.size(0) == batch_y.numel()
...@@ -66,19 +61,11 @@ def radius(x: torch.Tensor, y: torch.Tensor, r: float, ...@@ -66,19 +61,11 @@ def radius(x: torch.Tensor, y: torch.Tensor, r: float,
ptr_y = deg.new_zeros(batch_size + 1) ptr_y = deg.new_zeros(batch_size + 1)
torch.cumsum(deg, 0, out=ptr_y[1:]) torch.cumsum(deg, 0, out=ptr_y[1:])
else: else:
ptr_y = None#torch.tensor([0, y.size(0)], device=y.device) ptr_y = None
result = torch.ops.torch_cluster.radius(x, y, ptr_x, ptr_y, r, result = torch.ops.torch_cluster.radius(x, y, ptr_x, ptr_y, r,
max_num_neighbors) max_num_neighbors)
else: else:
#if batch_x is None:
# batch_x = x.new_zeros(x.size(0), dtype=torch.long)
#if batch_y is None:
# batch_y = y.new_zeros(y.size(0), dtype=torch.long)
#batch_x = batch_x.to(x.dtype)
#batch_y = batch_y.to(y.dtype)
assert x.dim() == 2 assert x.dim() == 2
if batch_x is not None: if batch_x is not None:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment