deform_conv.py 4.55 KB
Newer Older
yhcao6's avatar
yhcao6 committed
1
2
3
4
import torch
from torch.autograd import Function
from torch.nn.modules.utils import _pair

yhcao6's avatar
yhcao6 committed
5
from .. import deform_conv_cuda
yhcao6's avatar
yhcao6 committed
6
7
8
9


class DeformConvFunction(Function):

yhcao6's avatar
yhcao6 committed
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
    @staticmethod
    def forward(ctx,
                input,
                offset,
                weight,
                stride=1,
                padding=0,
                dilation=1,
                deformable_groups=1,
                im2col_step=64):
        if input is not None and input.dim() != 4:
            raise ValueError(
                "Expected 4D tensor as input, got {}D tensor instead.".format(
                    input.dim()))
        ctx.stride = _pair(stride)
        ctx.padding = _pair(padding)
        ctx.dilation = _pair(dilation)
        ctx.deformable_groups = deformable_groups
        ctx.im2col_step = im2col_step
yhcao6's avatar
yhcao6 committed
29

yhcao6's avatar
yhcao6 committed
30
        ctx.save_for_backward(input, offset, weight)
yhcao6's avatar
yhcao6 committed
31

yhcao6's avatar
yhcao6 committed
32
33
        output = input.new(*DeformConvFunction._output_size(
            input, weight, ctx.padding, ctx.dilation, ctx.stride))
yhcao6's avatar
yhcao6 committed
34

yhcao6's avatar
yhcao6 committed
35
        ctx.bufs_ = [input.new(), input.new()]  # columns, ones
yhcao6's avatar
yhcao6 committed
36
37
38
39
40
41
42
43
44
45
46

        if not input.is_cuda:
            raise NotImplementedError
        else:
            if isinstance(input, torch.autograd.Variable):
                if not isinstance(input.data, torch.cuda.FloatTensor):
                    raise NotImplementedError
            else:
                if not isinstance(input, torch.cuda.FloatTensor):
                    raise NotImplementedError

yhcao6's avatar
yhcao6 committed
47
            cur_im2col_step = min(ctx.im2col_step, input.shape[0])
yhcao6's avatar
yhcao6 committed
48
49
            assert (input.shape[0] %
                    cur_im2col_step) == 0, 'im2col step must divide batchsize'
yhcao6's avatar
yhcao6 committed
50
51
52
53
54
            deform_conv_cuda.deform_conv_forward_cuda(
                input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1],
                weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0],
                ctx.padding[1], ctx.padding[0], ctx.dilation[1],
                ctx.dilation[0], ctx.deformable_groups, cur_im2col_step)
yhcao6's avatar
yhcao6 committed
55
56
        return output

yhcao6's avatar
yhcao6 committed
57
58
59
    @staticmethod
    def backward(ctx, grad_output):
        input, offset, weight = ctx.saved_tensors
yhcao6's avatar
yhcao6 committed
60
61
62
63
64
65
66
67
68
69
70
71
72

        grad_input = grad_offset = grad_weight = None

        if not grad_output.is_cuda:
            raise NotImplementedError
        else:
            if isinstance(grad_output, torch.autograd.Variable):
                if not isinstance(grad_output.data, torch.cuda.FloatTensor):
                    raise NotImplementedError
            else:
                if not isinstance(grad_output, torch.cuda.FloatTensor):
                    raise NotImplementedError

yhcao6's avatar
yhcao6 committed
73
            cur_im2col_step = min(ctx.im2col_step, input.shape[0])
yhcao6's avatar
yhcao6 committed
74
75
76
            assert (input.shape[0] %
                    cur_im2col_step) == 0, 'im2col step must divide batchsize'

yhcao6's avatar
yhcao6 committed
77
78
79
80
            if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
                grad_input = torch.zeros_like(input)
                grad_offset = torch.zeros_like(offset)
                deform_conv_cuda.deform_conv_backward_input_cuda(
yhcao6's avatar
yhcao6 committed
81
                    input, offset, grad_output, grad_input,
yhcao6's avatar
yhcao6 committed
82
83
84
85
86
87
88
89
                    grad_offset, weight, ctx.bufs_[0], weight.size(3),
                    weight.size(2), ctx.stride[1], ctx.stride[0],
                    ctx.padding[1], ctx.padding[0], ctx.dilation[1],
                    ctx.dilation[0], ctx.deformable_groups, cur_im2col_step)

            if ctx.needs_input_grad[2]:
                grad_weight = torch.zeros_like(weight)
                deform_conv_cuda.deform_conv_backward_parameters_cuda(
yhcao6's avatar
yhcao6 committed
90
                    input, offset, grad_output,
yhcao6's avatar
yhcao6 committed
91
92
93
94
                    grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3),
                    weight.size(2), ctx.stride[1], ctx.stride[0],
                    ctx.padding[1], ctx.padding[0], ctx.dilation[1],
                    ctx.dilation[0], ctx.deformable_groups, 1, cur_im2col_step)
yhcao6's avatar
yhcao6 committed
95

yhcao6's avatar
yhcao6 committed
96
        return grad_input, grad_offset, grad_weight, None, None, None, None
yhcao6's avatar
yhcao6 committed
97

yhcao6's avatar
yhcao6 committed
98
99
    @staticmethod
    def _output_size(input, weight, padding, dilation, stride):
yhcao6's avatar
yhcao6 committed
100
101
102
103
        channels = weight.size(0)
        output_size = (input.size(0), channels)
        for d in range(input.dim() - 2):
            in_size = input.size(d + 2)
yhcao6's avatar
yhcao6 committed
104
105
106
107
            pad = padding[d]
            kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
            stride_ = stride[d]
            output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
yhcao6's avatar
yhcao6 committed
108
109
110
111
112
        if not all(map(lambda s: s > 0, output_size)):
            raise ValueError(
                "convolution input is too small (output would be {})".format(
                    'x'.join(map(str, output_size))))
        return output_size
yhcao6's avatar
yhcao6 committed
113
114
115


deform_conv = DeformConvFunction.apply