Unverified Commit 7d831a2f authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Restructuring C++ project: (#3146)

Summary:
* Reduce unnecessary header inclusions in models and io.

* Move autocast to separate folder and hide autograd implementation in an anonymous namespace.

* Moving files in subfolders.

Reviewed By: fmassa

Differential Revision: D25461523

fbshipit-source-id: 756eeb6848aacaa474de4825ed4c1045d17e2cea
parent 4d3a3093
......@@ -3,10 +3,6 @@
#include <torch/autograd.h>
#include <torch/types.h>
#if defined(WITH_CUDA) || defined(WITH_HIP)
#include <ATen/autocast_mode.h>
#endif
namespace vision {
namespace ops {
......@@ -34,32 +30,6 @@ at::Tensor roi_align(
aligned);
}
#if defined(WITH_CUDA) || defined(WITH_HIP)
at::Tensor roi_align_autocast(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio,
bool aligned) {
c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast);
return roi_align(
at::autocast::cached_cast(at::kFloat, input),
at::autocast::cached_cast(at::kFloat, rois),
spatial_scale,
pooled_height,
pooled_width,
sampling_ratio,
aligned)
.to(input.scalar_type());
}
TORCH_LIBRARY_IMPL(torchvision, Autocast, m) {
m.impl("roi_align", roi_align_autocast);
}
#endif
at::Tensor _roi_align_backward(
const at::Tensor& grad,
const at::Tensor& rois,
......@@ -198,8 +168,6 @@ class ROIAlignBackwardFunction
}
};
} // namespace
at::Tensor roi_align_autograd(
const at::Tensor& input,
const at::Tensor& rois,
......@@ -244,6 +212,8 @@ at::Tensor roi_align_backward_autograd(
aligned)[0];
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, Autograd, m) {
m.impl("roi_align", roi_align_autograd);
m.impl("_roi_align_backward", roi_align_backward_autograd);
......
#pragma once
#include <ATen/ATen.h>
#include "macros.h"
#include "../macros.h"
namespace vision {
namespace ops {
......
......@@ -3,10 +3,6 @@
#include <torch/autograd.h>
#include <torch/types.h>
#if defined(WITH_CUDA) || defined(WITH_HIP)
#include <ATen/autocast_mode.h>
#endif
namespace vision {
namespace ops {
......@@ -22,31 +18,6 @@ std::tuple<at::Tensor, at::Tensor> roi_pool(
return op.call(input, rois, spatial_scale, pooled_height, pooled_width);
}
#if defined(WITH_CUDA) || defined(WITH_HIP)
std::tuple<at::Tensor, at::Tensor> roi_pool_autocast(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast);
auto result = roi_pool(
at::autocast::cached_cast(at::kFloat, input),
at::autocast::cached_cast(at::kFloat, rois),
spatial_scale,
pooled_height,
pooled_width);
return std::make_tuple(
std::get<0>(result).to(input.scalar_type()),
std::get<1>(result).to(input.scalar_type()));
}
TORCH_LIBRARY_IMPL(torchvision, Autocast, m) {
m.impl("roi_pool", roi_pool_autocast);
}
#endif
at::Tensor _roi_pool_backward(
const at::Tensor& grad,
const at::Tensor& rois,
......@@ -175,8 +146,6 @@ class ROIPoolBackwardFunction
}
};
} // namespace
std::tuple<at::Tensor, at::Tensor> roi_pool_autograd(
const at::Tensor& input,
const at::Tensor& rois,
......@@ -213,6 +182,8 @@ at::Tensor roi_pool_backward_autograd(
width)[0];
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, Autograd, m) {
m.impl("roi_pool", roi_pool_autograd);
m.impl("_roi_pool_backward", roi_pool_backward_autograd);
......
#pragma once
#include <ATen/ATen.h>
#include "macros.h"
#include "../macros.h"
namespace vision {
namespace ops {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment