modulated_dcn_func.py 5.68 KB
Newer Older
yhcao6's avatar
yhcao6 committed
1
2
3
4
5
6
7
8
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import torch
from torch.autograd import Function

yhcao6's avatar
yhcao6 committed
9
from .. import modulated_dcn_cuda as _backend
yhcao6's avatar
yhcao6 committed
10
11
12
13


class ModulatedDeformConvFunction(Function):

yhcao6's avatar
yhcao6 committed
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
    def __init__(ctx, stride, padding, dilation=1, deformable_groups=1):
        super(ModulatedDeformConvFunction, ctx).__init__()
        ctx.stride = stride
        ctx.padding = padding
        ctx.dilation = dilation
        ctx.deformable_groups = deformable_groups

    @staticmethod
    def forward(ctx,
                input,
                offset,
                mask,
                weight,
                bias,
                stride,
                padding,
                dilation=1,
                deformable_groups=1):
        ctx.stride = stride
        ctx.padding = padding
        ctx.dilation = dilation
        ctx.deformable_groups = deformable_groups
yhcao6's avatar
yhcao6 committed
36
37
38
39
        if not input.is_cuda:
            raise NotImplementedError
        if weight.requires_grad or mask.requires_grad or offset.requires_grad \
                or input.requires_grad:
yhcao6's avatar
yhcao6 committed
40
41
42
43
            ctx.save_for_backward(input, offset, mask, weight, bias)
        output = input.new(
            *ModulatedDeformConvFunction._infer_shape(ctx, input, weight))
        ctx._bufs = [input.new(), input.new()]
yhcao6's avatar
yhcao6 committed
44
        _backend.modulated_deform_conv_cuda_forward(
yhcao6's avatar
yhcao6 committed
45
46
47
48
            input, weight, bias, ctx._bufs[0], offset, mask, output,
            ctx._bufs[1], weight.shape[2], weight.shape[3], ctx.stride,
            ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation,
            ctx.deformable_groups)
yhcao6's avatar
yhcao6 committed
49
50
        return output

yhcao6's avatar
yhcao6 committed
51
52
    @staticmethod
    def backward(ctx, grad_output):
yhcao6's avatar
yhcao6 committed
53
54
        if not grad_output.is_cuda:
            raise NotImplementedError
yhcao6's avatar
yhcao6 committed
55
56
57
58
59
60
        input, offset, mask, weight, bias = ctx.saved_tensors
        grad_input = torch.zeros_like(input)
        grad_offset = torch.zeros_like(offset)
        grad_mask = torch.zeros_like(mask)
        grad_weight = torch.zeros_like(weight)
        grad_bias = torch.zeros_like(bias)
yhcao6's avatar
yhcao6 committed
61
        _backend.modulated_deform_conv_cuda_backward(
yhcao6's avatar
yhcao6 committed
62
            input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1],
yhcao6's avatar
yhcao6 committed
63
            grad_input, grad_weight, grad_bias, grad_offset, grad_mask,
yhcao6's avatar
yhcao6 committed
64
65
66
            grad_output, weight.shape[2], weight.shape[3], ctx.stride,
            ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation,
            ctx.deformable_groups)
yhcao6's avatar
yhcao6 committed
67

yhcao6's avatar
yhcao6 committed
68
69
        return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias,
                None, None, None, None)
yhcao6's avatar
yhcao6 committed
70

yhcao6's avatar
yhcao6 committed
71
72
    @staticmethod
    def _infer_shape(ctx, input, weight):
yhcao6's avatar
yhcao6 committed
73
74
75
76
        n = input.size(0)
        channels_out = weight.size(0)
        height, width = input.shape[2:4]
        kernel_h, kernel_w = weight.shape[2:4]
yhcao6's avatar
yhcao6 committed
77
78
79
80
81
        height_out = (height + 2 * ctx.padding -
                      (ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1
        width_out = (width + 2 * ctx.padding -
                     (ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1
        return n, channels_out, height_out, width_out
yhcao6's avatar
yhcao6 committed
82
83
84
85


class DeformRoIPoolingFunction(Function):

yhcao6's avatar
yhcao6 committed
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
    @staticmethod
    def forward(ctx,
                data,
                rois,
                offset,
                spatial_scale,
                pooled_size,
                output_dim,
                no_trans,
                group_size=1,
                part_size=None,
                sample_per_part=4,
                trans_std=.0):
        ctx.spatial_scale = spatial_scale
        ctx.pooled_size = pooled_size
        ctx.output_dim = output_dim
        ctx.no_trans = no_trans
        ctx.group_size = group_size
        ctx.part_size = pooled_size if part_size is None else part_size
        ctx.sample_per_part = sample_per_part
        ctx.trans_std = trans_std

        assert 0.0 <= ctx.trans_std <= 1.0
yhcao6's avatar
yhcao6 committed
109
110
111
        if not data.is_cuda:
            raise NotImplementedError

yhcao6's avatar
yhcao6 committed
112
113
114
115
        output = data.new(
            *DeformRoIPoolingFunction._infer_shape(ctx, data, rois))
        output_count = data.new(
            *DeformRoIPoolingFunction._infer_shape(ctx, data, rois))
yhcao6's avatar
yhcao6 committed
116
        _backend.deform_psroi_pooling_cuda_forward(
yhcao6's avatar
yhcao6 committed
117
118
119
            data, rois, offset, output, output_count, ctx.no_trans,
            ctx.spatial_scale, ctx.output_dim, ctx.group_size, ctx.pooled_size,
            ctx.part_size, ctx.sample_per_part, ctx.trans_std)
yhcao6's avatar
yhcao6 committed
120
121

        # if data.requires_grad or rois.requires_grad or offset.requires_grad:
yhcao6's avatar
yhcao6 committed
122
123
124
125
126
        #     ctx.save_for_backward(data, rois, offset, output_count)
        ctx.data = data
        ctx.rois = rois
        ctx.offset = offset
        ctx.output_count = output_count
yhcao6's avatar
yhcao6 committed
127
128
129

        return output

yhcao6's avatar
yhcao6 committed
130
131
    @staticmethod
    def backward(ctx, grad_output):
yhcao6's avatar
yhcao6 committed
132
133
134
        if not grad_output.is_cuda:
            raise NotImplementedError

yhcao6's avatar
yhcao6 committed
135
136
137
138
139
140
141
        # data, rois, offset, output_count = ctx.saved_tensors
        data = ctx.data
        rois = ctx.rois
        offset = ctx.offset
        output_count = ctx.output_count
        grad_input = torch.zeros_like(data)
        grad_offset = torch.zeros_like(offset)
yhcao6's avatar
yhcao6 committed
142
143
144

        _backend.deform_psroi_pooling_cuda_backward(
            grad_output, data, rois, offset, output_count, grad_input,
yhcao6's avatar
yhcao6 committed
145
146
147
148
149
150
151
152
            grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.output_dim,
            ctx.group_size, ctx.pooled_size, ctx.part_size,
            ctx.sample_per_part, ctx.trans_std)
        return (grad_input, torch.zeros_like(rois), grad_offset, None, None,
                None, None, None, None, None, None)

    @staticmethod
    def _infer_shape(ctx, data, rois):
yhcao6's avatar
yhcao6 committed
153
        n = rois.shape[0]
yhcao6's avatar
yhcao6 committed
154
155
156
157
158
        return n, ctx.output_dim, ctx.pooled_size, ctx.pooled_size


modulated_deform_conv = ModulatedDeformConvFunction.apply
deform_roi_pooling = DeformRoIPoolingFunction.apply