Unverified Commit e89c4c01 authored by Shawn Zhong's avatar Shawn Zhong Committed by GitHub
Browse files

Avoid `using` in header files (#2257)

* Avoid `using` in header files

* Fix clang_format

* use clang-format-7 to reformat code
parent 3d65fc67
...@@ -88,21 +88,15 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> DeformConv2d_backward ...@@ -88,21 +88,15 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> DeformConv2d_backward
offset_groups); offset_groups);
} }
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class DeformConv2dFunction class DeformConv2dFunction
: public torch::autograd::Function<DeformConv2dFunction> { : public torch::autograd::Function<DeformConv2dFunction> {
public: public:
static variable_list forward( static torch::autograd::variable_list forward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
Variable input, torch::autograd::Variable input,
Variable weight, torch::autograd::Variable weight,
Variable offset, torch::autograd::Variable offset,
Variable bias, torch::autograd::Variable bias,
int64_t stride_h, int64_t stride_h,
int64_t stride_w, int64_t stride_w,
int64_t pad_h, int64_t pad_h,
...@@ -137,9 +131,9 @@ class DeformConv2dFunction ...@@ -137,9 +131,9 @@ class DeformConv2dFunction
}; };
} }
static variable_list backward( static torch::autograd::variable_list backward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
variable_list grad_output) { torch::autograd::variable_list grad_output) {
auto saved = ctx->get_saved_variables(); auto saved = ctx->get_saved_variables();
auto input = saved[0]; auto input = saved[0];
auto weight = saved[1]; auto weight = saved[1];
...@@ -176,14 +170,14 @@ class DeformConv2dFunction ...@@ -176,14 +170,14 @@ class DeformConv2dFunction
grad_weight, grad_weight,
grad_offset, grad_offset,
grad_bias, grad_bias,
Variable(), torch::autograd::Variable(),
Variable(), torch::autograd::Variable(),
Variable(), torch::autograd::Variable(),
Variable(), torch::autograd::Variable(),
Variable(), torch::autograd::Variable(),
Variable(), torch::autograd::Variable(),
Variable(), torch::autograd::Variable(),
Variable(), torch::autograd::Variable(),
}; };
} }
}; };
......
...@@ -79,19 +79,13 @@ at::Tensor PSROIAlign_backward( ...@@ -79,19 +79,13 @@ at::Tensor PSROIAlign_backward(
width); width);
} }
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class PSROIAlignFunction class PSROIAlignFunction
: public torch::autograd::Function<PSROIAlignFunction> { : public torch::autograd::Function<PSROIAlignFunction> {
public: public:
static variable_list forward( static torch::autograd::variable_list forward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
Variable input, torch::autograd::Variable input,
Variable rois, torch::autograd::Variable rois,
const double spatial_scale, const double spatial_scale,
const int64_t pooled_height, const int64_t pooled_height,
const int64_t pooled_width, const int64_t pooled_width,
...@@ -115,9 +109,9 @@ class PSROIAlignFunction ...@@ -115,9 +109,9 @@ class PSROIAlignFunction
return {output, channel_mapping}; return {output, channel_mapping};
} }
static variable_list backward( static torch::autograd::variable_list backward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
variable_list grad_output) { torch::autograd::variable_list grad_output) {
// Use data saved in forward // Use data saved in forward
auto saved = ctx->get_saved_variables(); auto saved = ctx->get_saved_variables();
auto rois = saved[0]; auto rois = saved[0];
...@@ -135,19 +129,23 @@ class PSROIAlignFunction ...@@ -135,19 +129,23 @@ class PSROIAlignFunction
input_shape[1], input_shape[1],
input_shape[2], input_shape[2],
input_shape[3]); input_shape[3]);
return { return {grad_in,
grad_in, Variable(), Variable(), Variable(), Variable(), Variable()}; torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable()};
} }
}; };
std::tuple<Tensor, Tensor> ps_roi_align( std::tuple<at::Tensor, at::Tensor> ps_roi_align(
const Tensor& input, const at::Tensor& input,
const Tensor& rois, const at::Tensor& rois,
const double spatial_scale, const double spatial_scale,
const int64_t pooled_height, const int64_t pooled_height,
const int64_t pooled_width, const int64_t pooled_width,
const int64_t sampling_ratio) { const int64_t sampling_ratio) {
auto result = PSROIAlignFunction::apply( auto result = PSROIAlignFunction::apply(
input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio);
return std::tuple<Tensor, Tensor>(result[0], result[1]); return std::tuple<at::Tensor, at::Tensor>(result[0], result[1]);
} }
...@@ -68,18 +68,12 @@ at::Tensor PSROIPool_backward( ...@@ -68,18 +68,12 @@ at::Tensor PSROIPool_backward(
width); width);
} }
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> { class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
public: public:
static variable_list forward( static torch::autograd::variable_list forward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
Variable input, torch::autograd::Variable input,
Variable rois, torch::autograd::Variable rois,
const double spatial_scale, const double spatial_scale,
const int64_t pooled_height, const int64_t pooled_height,
const int64_t pooled_width) { const int64_t pooled_width) {
...@@ -96,9 +90,9 @@ class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> { ...@@ -96,9 +90,9 @@ class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
return {output, channel_mapping}; return {output, channel_mapping};
} }
static variable_list backward( static torch::autograd::variable_list backward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
variable_list grad_output) { torch::autograd::variable_list grad_output) {
// Use data saved in forward // Use data saved in forward
auto saved = ctx->get_saved_variables(); auto saved = ctx->get_saved_variables();
auto rois = saved[0]; auto rois = saved[0];
...@@ -115,17 +109,21 @@ class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> { ...@@ -115,17 +109,21 @@ class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
input_shape[1], input_shape[1],
input_shape[2], input_shape[2],
input_shape[3]); input_shape[3]);
return {grad_in, Variable(), Variable(), Variable(), Variable()}; return {grad_in,
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable()};
} }
}; };
std::tuple<Tensor, Tensor> ps_roi_pool( std::tuple<at::Tensor, at::Tensor> ps_roi_pool(
const Tensor& input, const at::Tensor& input,
const Tensor& rois, const at::Tensor& rois,
const double spatial_scale, const double spatial_scale,
const int64_t pooled_height, const int64_t pooled_height,
const int64_t pooled_width) { const int64_t pooled_width) {
auto result = PSROIPoolFunction::apply( auto result = PSROIPoolFunction::apply(
input, rois, spatial_scale, pooled_height, pooled_width); input, rois, spatial_scale, pooled_height, pooled_width);
return std::tuple<Tensor, Tensor>(result[0], result[1]); return std::tuple<at::Tensor, at::Tensor>(result[0], result[1]);
} }
...@@ -89,18 +89,12 @@ at::Tensor ROIAlign_backward( ...@@ -89,18 +89,12 @@ at::Tensor ROIAlign_backward(
aligned); aligned);
} }
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> { class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> {
public: public:
static variable_list forward( static torch::autograd::variable_list forward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
Variable input, torch::autograd::Variable input,
Variable rois, torch::autograd::Variable rois,
const double spatial_scale, const double spatial_scale,
const int64_t pooled_height, const int64_t pooled_height,
const int64_t pooled_width, const int64_t pooled_width,
...@@ -124,9 +118,9 @@ class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> { ...@@ -124,9 +118,9 @@ class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> {
return {result}; return {result};
} }
static variable_list backward( static torch::autograd::variable_list backward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
variable_list grad_output) { torch::autograd::variable_list grad_output) {
// Use data saved in forward // Use data saved in forward
auto saved = ctx->get_saved_variables(); auto saved = ctx->get_saved_variables();
auto rois = saved[0]; auto rois = saved[0];
...@@ -144,18 +138,18 @@ class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> { ...@@ -144,18 +138,18 @@ class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> {
ctx->saved_data["sampling_ratio"].toInt(), ctx->saved_data["sampling_ratio"].toInt(),
ctx->saved_data["aligned"].toBool()); ctx->saved_data["aligned"].toBool());
return {grad_in, return {grad_in,
Variable(), torch::autograd::Variable(),
Variable(), torch::autograd::Variable(),
Variable(), torch::autograd::Variable(),
Variable(), torch::autograd::Variable(),
Variable(), torch::autograd::Variable(),
Variable()}; torch::autograd::Variable()};
} }
}; };
Tensor roi_align( at::Tensor roi_align(
const Tensor& input, const at::Tensor& input,
const Tensor& rois, const at::Tensor& rois,
const double spatial_scale, const double spatial_scale,
const int64_t pooled_height, const int64_t pooled_height,
const int64_t pooled_width, const int64_t pooled_width,
......
...@@ -68,18 +68,12 @@ at::Tensor ROIPool_backward( ...@@ -68,18 +68,12 @@ at::Tensor ROIPool_backward(
width); width);
} }
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> { class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> {
public: public:
static variable_list forward( static torch::autograd::variable_list forward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
Variable input, torch::autograd::Variable input,
Variable rois, torch::autograd::Variable rois,
const double spatial_scale, const double spatial_scale,
const int64_t pooled_height, const int64_t pooled_height,
const int64_t pooled_width) { const int64_t pooled_width) {
...@@ -96,9 +90,9 @@ class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> { ...@@ -96,9 +90,9 @@ class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> {
return {output, argmax}; return {output, argmax};
} }
static variable_list backward( static torch::autograd::variable_list backward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
variable_list grad_output) { torch::autograd::variable_list grad_output) {
// Use data saved in forward // Use data saved in forward
auto saved = ctx->get_saved_variables(); auto saved = ctx->get_saved_variables();
auto rois = saved[0]; auto rois = saved[0];
...@@ -115,17 +109,21 @@ class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> { ...@@ -115,17 +109,21 @@ class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> {
input_shape[1], input_shape[1],
input_shape[2], input_shape[2],
input_shape[3]); input_shape[3]);
return {grad_in, Variable(), Variable(), Variable(), Variable()}; return {grad_in,
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable()};
} }
}; };
std::tuple<Tensor, Tensor> roi_pool( std::tuple<at::Tensor, at::Tensor> roi_pool(
const Tensor& input, const at::Tensor& input,
const Tensor& rois, const at::Tensor& rois,
const double spatial_scale, const double spatial_scale,
const int64_t pooled_height, const int64_t pooled_height,
const int64_t pooled_width) { const int64_t pooled_width) {
auto result = ROIPoolFunction::apply( auto result = ROIPoolFunction::apply(
input, rois, spatial_scale, pooled_height, pooled_width); input, rois, spatial_scale, pooled_height, pooled_width);
return std::tuple<Tensor, Tensor>(result[0], result[1]); return std::tuple<at::Tensor, at::Tensor>(result[0], result[1]);
} }
...@@ -5,26 +5,20 @@ ...@@ -5,26 +5,20 @@
// Python bindings for the C++ frontend (includes Python.h). // Python bindings for the C++ frontend (includes Python.h).
#include <torch/python.h> #include <torch/python.h>
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class NewEmptyTensorOp : public torch::autograd::Function<NewEmptyTensorOp> { class NewEmptyTensorOp : public torch::autograd::Function<NewEmptyTensorOp> {
public: public:
static variable_list forward( static torch::autograd::variable_list forward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
Variable input, torch::autograd::Variable input,
c10::List<int64_t> new_shape) { c10::List<int64_t> new_shape) {
ctx->saved_data["shape"] = input.sizes(); ctx->saved_data["shape"] = input.sizes();
std::vector<int64_t> shape(new_shape.begin(), new_shape.end()); std::vector<int64_t> shape(new_shape.begin(), new_shape.end());
return {input.new_empty(shape, TensorOptions())}; return {input.new_empty(shape, at::TensorOptions())};
} }
static variable_list backward( static torch::autograd::variable_list backward(
AutogradContext* ctx, torch::autograd::AutogradContext* ctx,
variable_list grad_output) { torch::autograd::variable_list grad_output) {
// Use data saved in forward // Use data saved in forward
auto shape = ctx->saved_data["shape"].toIntList(); auto shape = ctx->saved_data["shape"].toIntList();
auto out = forward(ctx, grad_output[0], shape); auto out = forward(ctx, grad_output[0], shape);
...@@ -32,6 +26,6 @@ class NewEmptyTensorOp : public torch::autograd::Function<NewEmptyTensorOp> { ...@@ -32,6 +26,6 @@ class NewEmptyTensorOp : public torch::autograd::Function<NewEmptyTensorOp> {
} }
}; };
Tensor new_empty_tensor(const Tensor& input, c10::List<int64_t> shape) { at::Tensor new_empty_tensor(const at::Tensor& input, c10::List<int64_t> shape) {
return NewEmptyTensorOp::apply(input, shape)[0]; return NewEmptyTensorOp::apply(input, shape)[0];
} }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment