Unverified Commit e89c4c01 authored by Shawn Zhong's avatar Shawn Zhong Committed by GitHub
Browse files

Avoid `using` in header files (#2257)

* Avoid `using` in header files

* Fix clang_format

* use clang-format-7 to reformat code
parent 3d65fc67
......@@ -88,21 +88,15 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> DeformConv2d_backward
offset_groups);
}
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class DeformConv2dFunction
: public torch::autograd::Function<DeformConv2dFunction> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
Variable weight,
Variable offset,
Variable bias,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
torch::autograd::Variable weight,
torch::autograd::Variable offset,
torch::autograd::Variable bias,
int64_t stride_h,
int64_t stride_w,
int64_t pad_h,
......@@ -137,9 +131,9 @@ class DeformConv2dFunction
};
}
static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
auto saved = ctx->get_saved_variables();
auto input = saved[0];
auto weight = saved[1];
......@@ -176,14 +170,14 @@ class DeformConv2dFunction
grad_weight,
grad_offset,
grad_bias,
Variable(),
Variable(),
Variable(),
Variable(),
Variable(),
Variable(),
Variable(),
Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
};
}
};
......
......@@ -79,19 +79,13 @@ at::Tensor PSROIAlign_backward(
width);
}
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class PSROIAlignFunction
: public torch::autograd::Function<PSROIAlignFunction> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
Variable rois,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
torch::autograd::Variable rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width,
......@@ -115,9 +109,9 @@ class PSROIAlignFunction
return {output, channel_mapping};
}
static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
// Use data saved in forward
auto saved = ctx->get_saved_variables();
auto rois = saved[0];
......@@ -135,19 +129,23 @@ class PSROIAlignFunction
input_shape[1],
input_shape[2],
input_shape[3]);
return {
grad_in, Variable(), Variable(), Variable(), Variable(), Variable()};
return {grad_in,
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable()};
}
};
std::tuple<Tensor, Tensor> ps_roi_align(
const Tensor& input,
const Tensor& rois,
std::tuple<at::Tensor, at::Tensor> ps_roi_align(
const at::Tensor& input,
const at::Tensor& rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width,
const int64_t sampling_ratio) {
auto result = PSROIAlignFunction::apply(
input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio);
return std::tuple<Tensor, Tensor>(result[0], result[1]);
return std::tuple<at::Tensor, at::Tensor>(result[0], result[1]);
}
......@@ -68,18 +68,12 @@ at::Tensor PSROIPool_backward(
width);
}
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
Variable rois,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
torch::autograd::Variable rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width) {
......@@ -96,9 +90,9 @@ class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
return {output, channel_mapping};
}
static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
// Use data saved in forward
auto saved = ctx->get_saved_variables();
auto rois = saved[0];
......@@ -115,17 +109,21 @@ class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
input_shape[1],
input_shape[2],
input_shape[3]);
return {grad_in, Variable(), Variable(), Variable(), Variable()};
return {grad_in,
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable()};
}
};
std::tuple<Tensor, Tensor> ps_roi_pool(
const Tensor& input,
const Tensor& rois,
std::tuple<at::Tensor, at::Tensor> ps_roi_pool(
const at::Tensor& input,
const at::Tensor& rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width) {
auto result = PSROIPoolFunction::apply(
input, rois, spatial_scale, pooled_height, pooled_width);
return std::tuple<Tensor, Tensor>(result[0], result[1]);
return std::tuple<at::Tensor, at::Tensor>(result[0], result[1]);
}
......@@ -89,18 +89,12 @@ at::Tensor ROIAlign_backward(
aligned);
}
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
Variable rois,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
torch::autograd::Variable rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width,
......@@ -124,9 +118,9 @@ class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> {
return {result};
}
static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
// Use data saved in forward
auto saved = ctx->get_saved_variables();
auto rois = saved[0];
......@@ -144,18 +138,18 @@ class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> {
ctx->saved_data["sampling_ratio"].toInt(),
ctx->saved_data["aligned"].toBool());
return {grad_in,
Variable(),
Variable(),
Variable(),
Variable(),
Variable(),
Variable()};
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable()};
}
};
Tensor roi_align(
const Tensor& input,
const Tensor& rois,
at::Tensor roi_align(
const at::Tensor& input,
const at::Tensor& rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width,
......
......@@ -68,18 +68,12 @@ at::Tensor ROIPool_backward(
width);
}
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
Variable rois,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
torch::autograd::Variable rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width) {
......@@ -96,9 +90,9 @@ class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> {
return {output, argmax};
}
static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
// Use data saved in forward
auto saved = ctx->get_saved_variables();
auto rois = saved[0];
......@@ -115,17 +109,21 @@ class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> {
input_shape[1],
input_shape[2],
input_shape[3]);
return {grad_in, Variable(), Variable(), Variable(), Variable()};
return {grad_in,
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable()};
}
};
std::tuple<Tensor, Tensor> roi_pool(
const Tensor& input,
const Tensor& rois,
std::tuple<at::Tensor, at::Tensor> roi_pool(
const at::Tensor& input,
const at::Tensor& rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width) {
auto result = ROIPoolFunction::apply(
input, rois, spatial_scale, pooled_height, pooled_width);
return std::tuple<Tensor, Tensor>(result[0], result[1]);
return std::tuple<at::Tensor, at::Tensor>(result[0], result[1]);
}
......@@ -5,26 +5,20 @@
// Python bindings for the C++ frontend (includes Python.h).
#include <torch/python.h>
using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;
class NewEmptyTensorOp : public torch::autograd::Function<NewEmptyTensorOp> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
c10::List<int64_t> new_shape) {
ctx->saved_data["shape"] = input.sizes();
std::vector<int64_t> shape(new_shape.begin(), new_shape.end());
return {input.new_empty(shape, TensorOptions())};
return {input.new_empty(shape, at::TensorOptions())};
}
static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
// Use data saved in forward
auto shape = ctx->saved_data["shape"].toIntList();
auto out = forward(ctx, grad_output[0], shape);
......@@ -32,6 +26,6 @@ class NewEmptyTensorOp : public torch::autograd::Function<NewEmptyTensorOp> {
}
};
Tensor new_empty_tensor(const Tensor& input, c10::List<int64_t> shape) {
at::Tensor new_empty_tensor(const at::Tensor& input, c10::List<int64_t> shape) {
return NewEmptyTensorOp::apply(input, shape)[0];
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment