Unverified Commit d9549fba authored by zhuyuanhao's avatar zhuyuanhao Committed by GitHub
Browse files

fix cpp header error (#371)

* 1. use macro USE_PARROTS control header include
2. add clang-format google style in pre-commit

* use MMCV_ macros
parent 2c6fc5fd
......@@ -33,6 +33,6 @@ repos:
- id: clang-format
name: clang-format
description: Format files with ClangFormat
entry: clang-format -i
entry: clang-format -style=google -i
language: system
files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|cuh|proto)$
#ifndef BBOX_OVERLAPS_CUDA_KERNEL_CUH
#define BBOX_OVERLAPS_CUDA_KERNEL_CUH
#ifdef MMCV_USE_PARROTS
#include "parrots_cuda_helper.hpp"
#else
#include "pytorch_cuda_helper.hpp"
#endif
template <typename T>
__global__ void bbox_overlaps_cuda_kernel(const T* bbox1, const T* bbox2,
T* ious, const int num_bbox1,
......@@ -73,4 +79,5 @@ __global__ void bbox_overlaps_cuda_kernel(const T* bbox1, const T* bbox2,
}
}
}
#endif
#endif // BBOX_OVERLAPS_CUDA_KERNEL_CUH
#ifndef CARAFE_CUDA_KERNEL_CUH
#define CARAFE_CUDA_KERNEL_CUH
#ifdef MMCV_USE_PARROTS
#include "parrots_cuda_helper.hpp"
#else
#include "pytorch_cuda_helper.hpp"
#endif
#define WARP_SIZE 32
#define THREADS_PER_PIXEL 32
#define MAX_SHARED_MEMORY 49152
......@@ -301,3 +310,5 @@ __global__ void CARAFEBackward_Mask(const int num_kernels,
mask_diff[mask_id] = output_val;
}
}
#endif // CARAFE_CUDA_KERNEL_CUH
#ifndef CARAFE_NAIVE_CUDA_KERNEL_CUH
#define CARAFE_NAIVE_CUDA_KERNEL_CUH
#ifdef MMCV_USE_PARROTS
#include "parrots_cuda_helper.hpp"
#else
#include "pytorch_cuda_helper.hpp"
#endif
__device__ inline int Loc2Index(const int n, const int c, const int h,
const int w, const int channel_num,
const int height, const int width) {
......@@ -101,4 +107,4 @@ __global__ void carafe_naive_backward_cuda_kernel(
}
}
#endif
#endif // CARAFE_NAIVE_CUDA_KERNEL_CUH
#ifndef CA_CUDA_KERNEL_CUH
#define CA_CUDA_KERNEL_CUH
#ifndef CC_ATTENTION_CUDA_KERNEL_CUH
#define CC_ATTENTION_CUDA_KERNEL_CUH
#ifdef MMCV_USE_PARROTS
#include "parrots_cuda_helper.hpp"
#else
#include "pytorch_cuda_helper.hpp"
#endif
template <typename T>
__global__ void ca_forward_kernel(const T *t, const T *f, T *weight, int num,
......@@ -176,4 +182,4 @@ __global__ void ca_map_backward_kernel_g(const T *dout, const T *weight,
}
}
#endif
#endif // CC_ATTENTION_CUDA_KERNEL_CUH
......@@ -63,8 +63,14 @@
// modified from
// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
#ifndef DEFORM_CONV_KERNEL_CUH
#define DEFORM_CONV_KERNEL_CUH
#ifndef DEFORM_CONV_CUDA_KERNEL_CUH
#define DEFORM_CONV_CUDA_KERNEL_CUH
#ifdef MMCV_USE_PARROTS
#include "parrots_cuda_helper.hpp"
#else
#include "pytorch_cuda_helper.hpp"
#endif
template <typename T>
__device__ T deformable_im2col_bilinear(const T *input, const int data_width,
......@@ -353,4 +359,4 @@ __global__ void deformable_col2im_coord_gpu_kernel(
}
}
#endif
#endif // DEFORM_CONV_CUDA_KERNEL_CUH
#ifndef DEFORM_POOL_KERNEL_CUH
#define DEFORM_POOL_KERNEL_CUH
#ifndef DEFORM_ROI_POOL_CUDA_KERNEL_CUH
#define DEFORM_ROI_POOL_CUDA_KERNEL_CUH
#ifdef MMCV_USE_PARROTS
#include "parrots_cuda_helper.hpp"
#else
#include "pytorch_cuda_helper.hpp"
#endif
template <typename T>
__global__ void deform_roi_pool_forward_cuda_kernel(
......@@ -174,4 +180,4 @@ __global__ void deform_roi_pool_backward_cuda_kernel(
}
}
#endif
#endif // DEFORM_ROI_POOL_CUDA_KERNEL_CUH
#ifndef MASKED_CONV2D_CUDA_KERNEL_CUH
#define MASKED_CONV2D_CUDA_KERNEL_CUH
#ifdef MMCV_USE_PARROTS
#include "parrots_cuda_helper.hpp"
#else
#include "pytorch_cuda_helper.hpp"
#endif
template <typename scalar_t>
__global__ void MaskedIm2colForward(const int n, const scalar_t *data_im,
const int height, const int width,
......@@ -48,3 +57,5 @@ __global__ void MaskedCol2imForward(const int n, const scalar_t *data_col,
data_im[(c_im * height + h_im) * width + w_im] = data_col[index];
}
}
#endif // MASKED_CONV2D_CUDA_KERNEL_CUH
......@@ -63,8 +63,14 @@
// modified from
// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
#ifndef MODULATED_DEFORM_CONV_KERNEL_CUH
#define MODULATED_DEFORM_CONV_KERNEL_CUH
#ifndef MODULATED_DEFORM_CONV_CUDA_KERNEL_CUH
#define MODULATED_DEFORM_CONV_CUDA_KERNEL_CUH
#ifdef MMCV_USE_PARROTS
#include "parrots_cuda_helper.hpp"
#else
#include "pytorch_cuda_helper.hpp"
#endif
template <typename T>
__device__ T dmcn_im2col_bilinear(const T *input, const int data_width,
......@@ -385,4 +391,4 @@ __global__ void modulated_deformable_col2im_coord_gpu_kernel(
}
}
#endif
#endif // MODULATED_DEFORM_CONV_CUDA_KERNEL_CUH
#ifndef NMS_KERNEL_CUH
#define NMS_KERNEL_CUH
#ifndef NMS_CUDA_KERNEL_CUH
#define NMS_CUDA_KERNEL_CUH
#ifdef MMCV_USE_PARROTS
#include "parrots_cuda_helper.hpp"
#else
#include "pytorch_cuda_helper.hpp"
#endif
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long int) * 8;
......@@ -60,4 +66,4 @@ __global__ void nms_cuda(const int n_boxes, const float iou_threshold,
dev_mask[cur_box_idx * gridDim.y + col_start] = t;
}
}
#endif
#endif // NMS_CUDA_KERNEL_CUH
......@@ -406,7 +406,7 @@ void DeformConvBackwardInputCUDAKernelLauncher(
}
}
int DeformConvBackwardParametersCUDAKernelLauncher(
void DeformConvBackwardParametersCUDAKernelLauncher(
DArrayLite input, DArrayLite offset, DArrayLite gradOutput,
DArrayLite gradWeight, DArrayLite columns, DArrayLite ones, int kW, int kH,
int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group,
......
#include "parrots_cuda_helper.hpp"
#include "sigmoid_focal_loss_kernel.cuh"
#include "softmax_focal_loss_kernel.cuh"
#include "sigmoid_focal_loss_cuda_kernel.cuh"
#include "softmax_focal_loss_cuda_kernel.cuh"
void SigmoidFocalLossForwardCUDAKernelLauncher(
const DArrayLite input, const DArrayLite target, const DArrayLite weight,
......
#include "nms_kernel.cuh"
#include "nms_cuda_kernel.cuh"
#include "parrots_cuda_helper.hpp"
DArrayLite NMSCUDAKernelLauncher(const DArrayLite boxes_sorted,
......
#include "parrots_cuda_helper.hpp"
#include "roi_align_kernel.cuh"
#include "roi_align_cuda_kernel.cuh"
void ROIAlignForwardCUDAKernelLauncher(const DArrayLite input,
const DArrayLite rois, DArrayLite output,
......
#include "parrots_cuda_helper.hpp"
#include "roi_pool_kernel.cuh"
#include "roi_pool_cuda_kernel.cuh"
void ROIPoolForwardCUDAKernelLauncher(const DArrayLite input,
const DArrayLite rois, DArrayLite output,
......
#ifndef PSAMASK_CUDA_CUH
#define PSAMASK_CUDA_CUH
#ifndef PSAMASK_CUDA_KERNEL_CUH
#define PSAMASK_CUDA_KERNEL_CUH
#ifdef MMCV_USE_PARROTS
#include "parrots_cuda_helper.hpp"
#else
#include "pytorch_cuda_helper.hpp"
#endif
// CUDA: grid stride looping
#ifndef CUDA_KERNEL_LOOP
#define CUDA_KERNEL_LOOP(i, n) \
......@@ -130,4 +137,4 @@ __global__ void psamask_distribute_backward_cuda(
}
}
#endif
#endif // PSAMASK_CUDA_KERNEL_CUH
#include "pytorch_cpp_helper.hpp"
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
void BBoxOverlapsCUDAKernelLauncher(const Tensor bboxes1, const Tensor bboxes2,
Tensor ious, const int mode,
const bool aligned, const int offset);
......@@ -14,7 +14,7 @@ void bbox_overlaps_cuda(const Tensor bboxes1, const Tensor bboxes2, Tensor ious,
void bbox_overlaps(const Tensor bboxes1, const Tensor bboxes2, Tensor ious,
const int mode, const bool aligned, const int offset) {
if (bboxes1.device().is_cuda()) {
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
CHECK_CUDA_INPUT(bboxes1);
CHECK_CUDA_INPUT(bboxes2);
CHECK_CUDA_INPUT(ious);
......
#include "pytorch_cpp_helper.hpp"
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
void CARAFEForwardCUDAKernelLauncher(const Tensor features, const Tensor masks,
Tensor rfeatures, Tensor routput,
Tensor rmasks, Tensor output,
......@@ -38,7 +38,7 @@ void carafe_forward(Tensor features, Tensor masks, Tensor rfeatures,
Tensor routput, Tensor rmasks, Tensor output,
int kernel_size, int group_size, int scale_factor) {
if (features.device().is_cuda()) {
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
CHECK_CUDA_INPUT(features);
CHECK_CUDA_INPUT(masks);
CHECK_CUDA_INPUT(rfeatures);
......@@ -61,7 +61,7 @@ void carafe_backward(Tensor top_grad, Tensor rfeatures, Tensor masks,
Tensor mask_grad, int kernel_size, int group_size,
int scale_factor) {
if (top_grad.device().is_cuda()) {
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
CHECK_CUDA_INPUT(top_grad);
CHECK_CUDA_INPUT(rfeatures);
CHECK_CUDA_INPUT(masks);
......
#include "pytorch_cpp_helper.hpp"
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
void CARAFENAIVEForwardCUDAKernelLauncher(const Tensor features,
const Tensor masks, Tensor output,
const int kernel_size,
......@@ -32,7 +32,7 @@ void carafe_naive_backward_cuda(Tensor top_grad, Tensor features, Tensor masks,
void carafe_naive_forward(Tensor features, Tensor masks, Tensor output,
int kernel_size, int group_size, int scale_factor) {
if (features.device().is_cuda()) {
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
CHECK_CUDA_INPUT(features);
CHECK_CUDA_INPUT(masks);
CHECK_CUDA_INPUT(output);
......@@ -50,7 +50,7 @@ void carafe_naive_backward(Tensor top_grad, Tensor features, Tensor masks,
Tensor bottom_grad, Tensor mask_grad,
int kernel_size, int group_size, int scale_factor) {
if (top_grad.device().is_cuda()) {
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
CHECK_CUDA_INPUT(top_grad);
CHECK_CUDA_INPUT(features);
CHECK_CUDA_INPUT(masks);
......
#include "pytorch_cpp_helper.hpp"
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
void CAForwardCUDAKernelLauncher(const Tensor t, const Tensor f, Tensor weight);
void CABackwardCUDAKernelLauncher(const Tensor dw, const Tensor t,
......@@ -33,7 +33,7 @@ void ca_map_backward_cuda(const Tensor dout, const Tensor weight,
void ca_forward(const Tensor t, const Tensor f, Tensor weight) {
if (t.device().is_cuda()) {
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
CHECK_CUDA_INPUT(t);
CHECK_CUDA_INPUT(f);
CHECK_CUDA_INPUT(weight);
......@@ -49,7 +49,7 @@ void ca_forward(const Tensor t, const Tensor f, Tensor weight) {
void ca_backward(const Tensor dw, const Tensor t, const Tensor f, Tensor dt,
Tensor df) {
if (dw.device().is_cuda()) {
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
CHECK_CUDA_INPUT(dw);
CHECK_CUDA_INPUT(t);
CHECK_CUDA_INPUT(f);
......@@ -66,7 +66,7 @@ void ca_backward(const Tensor dw, const Tensor t, const Tensor f, Tensor dt,
void ca_map_forward(const Tensor weight, const Tensor g, Tensor out) {
if (weight.device().is_cuda()) {
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
CHECK_CUDA_INPUT(weight);
CHECK_CUDA_INPUT(g);
CHECK_CUDA_INPUT(out);
......@@ -82,7 +82,7 @@ void ca_map_forward(const Tensor weight, const Tensor g, Tensor out) {
void ca_map_backward(const Tensor dout, const Tensor weight, const Tensor g,
Tensor dw, Tensor dg) {
if (dout.device().is_cuda()) {
#ifdef WITH_CUDA
#ifdef MMCV_WITH_CUDA
CHECK_CUDA_INPUT(dout);
CHECK_CUDA_INPUT(weight);
CHECK_CUDA_INPUT(g);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment