Unverified Commit 7d831a2f authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Restructuring C++ project: (#3146)

Summary:
* Reduce unnecessary header inclusions in models and io.

* Move autocast to separate folder and hide autograd implementation in an anonymous namespace.

* Moving files in subfolders.

Reviewed By: fmassa

Differential Revision: D25461523

fbshipit-source-id: 756eeb6848aacaa474de4825ed4c1045d17e2cea
parent 4d3a3093
......@@ -3,10 +3,6 @@
#include <torch/autograd.h>
#include <torch/types.h>
#if defined(WITH_CUDA) || defined(WITH_HIP)
#include <ATen/autocast_mode.h>
#endif
namespace vision {
namespace ops {
......@@ -45,46 +41,6 @@ at::Tensor deform_conv2d(
use_mask);
}
#if defined(WITH_CUDA) || defined(WITH_HIP)
at::Tensor deform_conv2d_autocast(
const at::Tensor& input,
const at::Tensor& weight,
const at::Tensor& offset,
const at::Tensor& mask,
const at::Tensor& bias,
int64_t stride_h,
int64_t stride_w,
int64_t pad_h,
int64_t pad_w,
int64_t dilation_h,
int64_t dilation_w,
int64_t groups,
int64_t offset_groups,
bool use_mask) {
c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast);
return deform_conv2d(
at::autocast::cached_cast(at::kFloat, input),
at::autocast::cached_cast(at::kFloat, weight),
at::autocast::cached_cast(at::kFloat, offset),
at::autocast::cached_cast(at::kFloat, mask),
at::autocast::cached_cast(at::kFloat, bias),
stride_h,
stride_w,
pad_h,
pad_w,
dilation_h,
dilation_w,
groups,
offset_groups,
use_mask)
.to(input.scalar_type());
}
TORCH_LIBRARY_IMPL(torchvision, Autocast, m) {
m.impl("deform_conv2d", deform_conv2d_autocast);
}
#endif
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor>
_deform_conv2d_backward(
const at::Tensor& grad,
......@@ -307,8 +263,6 @@ class DeformConv2dBackwardFunction
}
};
} // namespace
at::Tensor deform_conv2d_autograd(
const at::Tensor& input,
const at::Tensor& weight,
......@@ -378,6 +332,8 @@ deform_conv2d_backward_autograd(
return std::make_tuple(result[0], result[1], result[2], result[3], result[4]);
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, Autograd, m) {
m.impl("deform_conv2d", deform_conv2d_autograd);
m.impl("_deform_conv2d_backward", deform_conv2d_backward_autograd);
......
#pragma once
#include <ATen/ATen.h>
#include "macros.h"
#include "../macros.h"
namespace vision {
namespace ops {
......
#pragma once
#include <ATen/ATen.h>
#include "macros.h"
#include "../macros.h"
namespace vision {
namespace ops {
......
......@@ -3,10 +3,6 @@
#include <torch/autograd.h>
#include <torch/types.h>
#if defined(WITH_CUDA) || defined(WITH_HIP)
#include <ATen/autocast_mode.h>
#endif
namespace vision {
namespace ops {
......@@ -20,23 +16,6 @@ at::Tensor nms(
return op.call(dets, scores, iou_threshold);
}
#if defined(WITH_CUDA) || defined(WITH_HIP)
at::Tensor nms_autocast(
const at::Tensor& dets,
const at::Tensor& scores,
double iou_threshold) {
c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast);
return nms(
at::autocast::cached_cast(at::kFloat, dets),
at::autocast::cached_cast(at::kFloat, scores),
iou_threshold);
}
TORCH_LIBRARY_IMPL(torchvision, Autocast, m) {
m.impl("nms", nms_autocast);
}
#endif
TORCH_LIBRARY_FRAGMENT(torchvision, m) {
m.def("nms(Tensor dets, Tensor scores, float iou_threshold) -> Tensor");
}
......
#pragma once
#include <ATen/ATen.h>
#include "macros.h"
#include "../macros.h"
namespace vision {
namespace ops {
......
......@@ -3,10 +3,6 @@
#include <torch/autograd.h>
#include <torch/types.h>
#if defined(WITH_CUDA) || defined(WITH_HIP)
#include <ATen/autocast_mode.h>
#endif
namespace vision {
namespace ops {
......@@ -24,33 +20,6 @@ std::tuple<at::Tensor, at::Tensor> ps_roi_align(
input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio);
}
#if defined(WITH_CUDA) || defined(WITH_HIP)
std::tuple<at::Tensor, at::Tensor> ps_roi_align_autocast(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio) {
c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast);
auto result = ps_roi_align(
at::autocast::cached_cast(at::kFloat, input),
at::autocast::cached_cast(at::kFloat, rois),
spatial_scale,
pooled_height,
pooled_width,
sampling_ratio);
return std::make_tuple(
std::get<0>(result).to(input.scalar_type()),
std::get<1>(result).to(input.scalar_type()));
}
TORCH_LIBRARY_IMPL(torchvision, Autocast, m) {
m.impl("ps_roi_align", ps_roi_align_autocast);
}
#endif
at::Tensor _ps_roi_align_backward(
const at::Tensor& grad,
const at::Tensor& rois,
......@@ -194,8 +163,6 @@ class PSROIAlignBackwardFunction
}
};
} // namespace
std::tuple<at::Tensor, at::Tensor> ps_roi_align_autograd(
const at::Tensor& input,
const at::Tensor& rois,
......@@ -235,6 +202,8 @@ at::Tensor ps_roi_align_backward_autograd(
width)[0];
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, Autograd, m) {
m.impl("ps_roi_align", ps_roi_align_autograd);
m.impl("_ps_roi_align_backward", ps_roi_align_backward_autograd);
......
#pragma once
#include <ATen/ATen.h>
#include "macros.h"
#include "../macros.h"
namespace vision {
namespace ops {
......
......@@ -3,10 +3,6 @@
#include <torch/autograd.h>
#include <torch/types.h>
#if defined(WITH_CUDA) || defined(WITH_HIP)
#include <ATen/autocast_mode.h>
#endif
namespace vision {
namespace ops {
......@@ -22,31 +18,6 @@ std::tuple<at::Tensor, at::Tensor> ps_roi_pool(
return op.call(input, rois, spatial_scale, pooled_height, pooled_width);
}
#if defined(WITH_CUDA) || defined(WITH_HIP)
std::tuple<at::Tensor, at::Tensor> ps_roi_pool_autocast(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast);
auto result = ps_roi_pool(
at::autocast::cached_cast(at::kFloat, input),
at::autocast::cached_cast(at::kFloat, rois),
spatial_scale,
pooled_height,
pooled_width);
return std::make_tuple(
std::get<0>(result).to(input.scalar_type()),
std::get<1>(result).to(input.scalar_type()));
}
TORCH_LIBRARY_IMPL(torchvision, Autocast, m) {
m.impl("ps_roi_pool", ps_roi_pool_autocast);
}
#endif
at::Tensor _ps_roi_pool_backward(
const at::Tensor& grad,
const at::Tensor& rois,
......@@ -176,8 +147,6 @@ class PSROIPoolBackwardFunction
}
};
} // namespace
std::tuple<at::Tensor, at::Tensor> ps_roi_pool_autograd(
const at::Tensor& input,
const at::Tensor& rois,
......@@ -214,6 +183,8 @@ at::Tensor ps_roi_pool_backward_autograd(
width)[0];
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, Autograd, m) {
m.impl("ps_roi_pool", ps_roi_pool_autograd);
m.impl("_ps_roi_pool_backward", ps_roi_pool_backward_autograd);
......
#pragma once
#include <ATen/ATen.h>
#include "macros.h"
#include "../macros.h"
namespace vision {
namespace ops {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment