"tools/imglab/git@developer.sourcefind.cn:OpenDAS/dlib.git" did not exist on "48507ab5904d9d11c3c3fb09d002140ed705a898"
Commit de4b66a1 authored by Ruilong Li's avatar Ruilong Li
Browse files

renaming

parent 4a4bbaba
...@@ -47,7 +47,7 @@ inline __host__ __device__ void _ray_aabb_intersect( ...@@ -47,7 +47,7 @@ inline __host__ __device__ void _ray_aabb_intersect(
template <typename scalar_t> template <typename scalar_t>
__global__ void kernel_ray_aabb_intersect( __global__ void ray_aabb_intersect_kernel(
const int N, const int N,
const scalar_t* rays_o, const scalar_t* rays_o,
const scalar_t* rays_d, const scalar_t* rays_d,
...@@ -103,7 +103,7 @@ std::vector<torch::Tensor> ray_aabb_intersect( ...@@ -103,7 +103,7 @@ std::vector<torch::Tensor> ray_aabb_intersect(
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
rays_o.scalar_type(), "ray_aabb_intersect", rays_o.scalar_type(), "ray_aabb_intersect",
([&] { ([&] {
kernel_ray_aabb_intersect<scalar_t><<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( ray_aabb_intersect_kernel<scalar_t><<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
N, N,
rays_o.data_ptr<scalar_t>(), rays_o.data_ptr<scalar_t>(),
rays_d.data_ptr<scalar_t>(), rays_d.data_ptr<scalar_t>(),
......
...@@ -14,14 +14,14 @@ std::vector<torch::Tensor> volumetric_rendering_steps( ...@@ -14,14 +14,14 @@ std::vector<torch::Tensor> volumetric_rendering_steps(
torch::Tensor sigmas torch::Tensor sigmas
); );
std::vector<torch::Tensor> volumetric_weights_forward( std::vector<torch::Tensor> volumetric_rendering_weights_forward(
torch::Tensor packed_info, torch::Tensor packed_info,
torch::Tensor starts, torch::Tensor starts,
torch::Tensor ends, torch::Tensor ends,
torch::Tensor sigmas torch::Tensor sigmas
); );
torch::Tensor volumetric_weights_backward( torch::Tensor volumetric_rendering_weights_backward(
torch::Tensor weights, torch::Tensor weights,
torch::Tensor grad_weights, torch::Tensor grad_weights,
torch::Tensor packed_info, torch::Tensor packed_info,
...@@ -49,6 +49,6 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) ...@@ -49,6 +49,6 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
m.def("ray_aabb_intersect", &ray_aabb_intersect); m.def("ray_aabb_intersect", &ray_aabb_intersect);
m.def("ray_marching", &ray_marching); m.def("ray_marching", &ray_marching);
m.def("volumetric_rendering_steps", &volumetric_rendering_steps); m.def("volumetric_rendering_steps", &volumetric_rendering_steps);
m.def("volumetric_weights_forward", &volumetric_weights_forward); m.def("volumetric_rendering_weights_forward", &volumetric_rendering_weights_forward);
m.def("volumetric_weights_backward", &volumetric_weights_backward); m.def("volumetric_rendering_weights_backward", &volumetric_rendering_weights_backward);
} }
\ No newline at end of file
...@@ -259,7 +259,7 @@ std::vector<torch::Tensor> ray_marching( ...@@ -259,7 +259,7 @@ std::vector<torch::Tensor> ray_marching(
occ_binary.data_ptr<bool>(), occ_binary.data_ptr<bool>(),
// sampling // sampling
dt, dt,
// writable helpers // outputs
num_steps.data_ptr<int>() num_steps.data_ptr<int>()
); );
......
...@@ -45,7 +45,7 @@ __global__ void volumetric_rendering_steps_kernel( ...@@ -45,7 +45,7 @@ __global__ void volumetric_rendering_steps_kernel(
template <typename scalar_t> template <typename scalar_t>
__global__ void volumetric_weights_forward_kernel( __global__ void volumetric_rendering_weights_forward_kernel(
const uint32_t n_rays, const uint32_t n_rays,
const int* packed_info, // input ray & point indices. const int* packed_info, // input ray & point indices.
const scalar_t* starts, // input start t const scalar_t* starts, // input start t
...@@ -92,7 +92,7 @@ __global__ void volumetric_weights_forward_kernel( ...@@ -92,7 +92,7 @@ __global__ void volumetric_weights_forward_kernel(
template <typename scalar_t> template <typename scalar_t>
__global__ void volumetric_weights_backward_kernel( __global__ void volumetric_rendering_weights_backward_kernel(
const uint32_t n_rays, const uint32_t n_rays,
const int* packed_info, // input ray & point indices. const int* packed_info, // input ray & point indices.
const scalar_t* starts, // input start t const scalar_t* starts, // input start t
...@@ -187,7 +187,7 @@ std::vector<torch::Tensor> volumetric_rendering_steps( ...@@ -187,7 +187,7 @@ std::vector<torch::Tensor> volumetric_rendering_steps(
} }
std::vector<torch::Tensor> volumetric_weights_forward( std::vector<torch::Tensor> volumetric_rendering_weights_forward(
torch::Tensor packed_info, torch::Tensor packed_info,
torch::Tensor starts, torch::Tensor starts,
torch::Tensor ends, torch::Tensor ends,
...@@ -217,9 +217,9 @@ std::vector<torch::Tensor> volumetric_weights_forward( ...@@ -217,9 +217,9 @@ std::vector<torch::Tensor> volumetric_weights_forward(
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
sigmas.scalar_type(), sigmas.scalar_type(),
"volumetric_weights_forward", "volumetric_rendering_weights_forward",
([&] ([&]
{ volumetric_weights_forward_kernel<scalar_t><<<blocks, threads>>>( { volumetric_rendering_weights_forward_kernel<scalar_t><<<blocks, threads>>>(
n_rays, n_rays,
packed_info.data_ptr<int>(), packed_info.data_ptr<int>(),
starts.data_ptr<scalar_t>(), starts.data_ptr<scalar_t>(),
...@@ -235,7 +235,7 @@ std::vector<torch::Tensor> volumetric_weights_forward( ...@@ -235,7 +235,7 @@ std::vector<torch::Tensor> volumetric_weights_forward(
} }
torch::Tensor volumetric_weights_backward( torch::Tensor volumetric_rendering_weights_backward(
torch::Tensor weights, torch::Tensor weights,
torch::Tensor grad_weights, torch::Tensor grad_weights,
torch::Tensor packed_info, torch::Tensor packed_info,
...@@ -255,9 +255,9 @@ torch::Tensor volumetric_weights_backward( ...@@ -255,9 +255,9 @@ torch::Tensor volumetric_weights_backward(
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
sigmas.scalar_type(), sigmas.scalar_type(),
"volumetric_weights_backward", "volumetric_rendering_weights_backward",
([&] ([&]
{ volumetric_weights_backward_kernel<scalar_t><<<blocks, threads>>>( { volumetric_rendering_weights_backward_kernel<scalar_t><<<blocks, threads>>>(
n_rays, n_rays,
packed_info.data_ptr<int>(), packed_info.data_ptr<int>(),
starts.data_ptr<scalar_t>(), starts.data_ptr<scalar_t>(),
......
...@@ -32,7 +32,7 @@ def ray_aabb_intersect( ...@@ -32,7 +32,7 @@ def ray_aabb_intersect(
return t_min, t_max return t_min, t_max
def volumetric_weights( def volumetric_rendering_weights(
packed_info: torch.Tensor, packed_info: torch.Tensor,
t_starts: torch.Tensor, t_starts: torch.Tensor,
t_ends: torch.Tensor, t_ends: torch.Tensor,
...@@ -64,7 +64,7 @@ def volumetric_weights( ...@@ -64,7 +64,7 @@ def volumetric_weights(
t_starts = t_starts.contiguous() t_starts = t_starts.contiguous()
t_ends = t_ends.contiguous() t_ends = t_ends.contiguous()
sigmas = sigmas.contiguous() sigmas = sigmas.contiguous()
weights, ray_indices, ray_alive_masks = _volumetric_weights.apply( weights, ray_indices, ray_alive_masks = _volumetric_rendering_weights.apply(
packed_info, t_starts, t_ends, sigmas packed_info, t_starts, t_ends, sigmas
) )
else: else:
...@@ -120,14 +120,16 @@ def volumetric_accumulate( ...@@ -120,14 +120,16 @@ def volumetric_accumulate(
return outputs return outputs
class _volumetric_weights(torch.autograd.Function): class _volumetric_rendering_weights(torch.autograd.Function):
@staticmethod @staticmethod
def forward(ctx, packed_info, t_starts, t_ends, sigmas): def forward(ctx, packed_info, t_starts, t_ends, sigmas):
( (
weights, weights,
ray_indices, ray_indices,
ray_alive_masks, ray_alive_masks,
) = _C.volumetric_weights_forward(packed_info, t_starts, t_ends, sigmas) ) = _C.volumetric_rendering_weights_forward(
packed_info, t_starts, t_ends, sigmas
)
ctx.save_for_backward( ctx.save_for_backward(
packed_info, packed_info,
t_starts, t_starts,
...@@ -146,7 +148,7 @@ class _volumetric_weights(torch.autograd.Function): ...@@ -146,7 +148,7 @@ class _volumetric_weights(torch.autograd.Function):
sigmas, sigmas,
weights, weights,
) = ctx.saved_tensors ) = ctx.saved_tensors
grad_sigmas = _C.volumetric_weights_backward( grad_sigmas = _C.volumetric_rendering_weights_backward(
weights, weights,
grad_weights, grad_weights,
packed_info, packed_info,
......
...@@ -7,7 +7,11 @@ from .cuda import ( # ComputeWeight,; VolumeRenderer,; ray_aabb_intersect, ...@@ -7,7 +7,11 @@ from .cuda import ( # ComputeWeight,; VolumeRenderer,; ray_aabb_intersect,
ray_marching, ray_marching,
volumetric_rendering_steps, volumetric_rendering_steps,
) )
from .utils import ray_aabb_intersect, volumetric_accumulate, volumetric_weights from .utils import (
ray_aabb_intersect,
volumetric_accumulate,
volumetric_rendering_weights,
)
def volumetric_rendering( def volumetric_rendering(
...@@ -94,7 +98,7 @@ def volumetric_rendering( ...@@ -94,7 +98,7 @@ def volumetric_rendering(
compact_rgbs, compact_densities = compact_query_results[0], compact_query_results[1] compact_rgbs, compact_densities = compact_query_results[0], compact_query_results[1]
# accumulation # accumulation
compact_weights, compact_ray_indices, alive_ray_mask = volumetric_weights( compact_weights, compact_ray_indices, alive_ray_mask = volumetric_rendering_weights(
compact_packed_info, compact_packed_info,
compact_frustum_starts, compact_frustum_starts,
compact_frustum_ends, compact_frustum_ends,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment