##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ## Created by: Hang Zhang ## ECE Department, Rutgers University ## Email: zhang.hang@rutgers.edu ## Copyright (c) 2017 ## ## This source code is licensed under the MIT-style license found in the ## LICENSE file in the root directory of this source tree ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ import math import torch from torch.autograd import Variable from torch.nn import Module, Sequential from torch.nn import functional as F from torch.nn.parameter import Parameter from torch.nn.modules.utils import _single, _pair, _triple from ..parallel import my_data_parallel from ..functions import view_each __all__ = ['Module', 'Sequential', 'Conv1d', 'Conv2d', 'ConvTranspose2d', 'ReLU', 'Sigmoid', 'MaxPool2d', 'AvgPool2d', 'AdaptiveAvgPool2d', 'Dropout2d', 'Linear'] class _ConvNd(Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias): super(_ConvNd, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.transposed = transposed self.output_padding = output_padding self.groups = groups if transposed: self.weight = Parameter(torch.Tensor( in_channels, out_channels // groups, *kernel_size)) else: self.weight = Parameter(torch.Tensor( out_channels, in_channels // groups, *kernel_size)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1. / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def __repr__(self): s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}' ', stride={stride}') if self.padding != (0,) * len(self.padding): s += ', padding={padding}' if self.dilation != (1,) * len(self.dilation): s += ', dilation={dilation}' if self.output_padding != (0,) * len(self.output_padding): s += ', output_padding={output_padding}' if self.groups != 1: s += ', groups={groups}' if self.bias is None: s += ', bias=False' s += ')' return s.format(name=self.__class__.__name__, **self.__dict__) class Conv1d(_ConvNd): r"""Applies a 1D convolution over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, L)` and output :math:`(N, C_{out}, L_{out})` can be precisely described as: .. math:: \begin{array}{ll} out(N_i, C_{out_j}) = bias(C_{out_j}) + \sum_{{k}=0}^{C_{in}-1} weight(C_{out_j}, k) \star input(N_i, k) \end{array} where :math:`\star` is the valid `cross-correlation`_ operator | :attr:`stride` controls the stride for the cross-correlation. | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides for :attr:`padding` number of points. | :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. | :attr:`groups` controls the connections between inputs and outputs. `in_channels` and `out_channels` must both be divisible by `groups`. | At groups=1, all inputs are convolved to all outputs. | At groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels, and producing half the output channels, and both subsequently concatenated. At groups=`in_channels`, each input channel is convolved with its own set of filters (of size `out_channels // in_channels`). Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If True, adds a learnable bias to the output. Default: True Shape: - Input: :math:`(N, C_{in}, L_{in})` - Output: :math:`(N, C_{out}, L_{out})` where :math:`L_{out} = floor((L_{in} + 2 * padding - dilation * (kernel\_size - 1) - 1) / stride + 1)` Attributes: weight (Tensor): the learnable weights of the module of shape (out_channels, in_channels, kernel_size) bias (Tensor): the learnable bias of the module of shape (out_channels) Examples:: >>> m = nn.Conv1d(16, 33, 3, stride=2) >>> input = autograd.Variable(torch.randn(20, 16, 50)) >>> output = m(input) .. _cross-correlation: https://en.wikipedia.org/wiki/Cross-correlation .. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): kernel_size = _single(kernel_size) stride = _single(stride) padding = _single(padding) dilation = _single(dilation) super(Conv1d, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, False, _single(0), groups, bias) def forward(self, input): return F.conv1d(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) class Conv2d(_ConvNd): r"""Applies a 2D convolution over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, H, W)` and output :math:`(N, C_{out}, H_{out}, W_{out})` can be precisely described as: .. math:: \begin{array}{ll} out(N_i, C_{out_j}) = bias(C_{out_j}) + \sum_{{k}=0}^{C_{in}-1} weight(C_{out_j}, k) \star input(N_i, k) \end{array} where :math:`\star` is the valid 2D `cross-correlation`_ operator | :attr:`stride` controls the stride for the cross-correlation. | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides for :attr:`padding` number of points. | :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. | :attr:`groups` controls the connections between inputs and outputs. `in_channels` and `out_channels` must both be divisible by `groups`. | At groups=1, all inputs are convolved to all outputs. | At groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels, and producing half the output channels, and both subsequently concatenated. At groups=`in_channels`, each input channel is convolved with its own set of filters (of size `out_channels // in_channels`). The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: - a single ``int`` -- in which case the same value is used for the height and width dimension - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If True, adds a learnable bias to the output. Default: True Shape: - Input: :math:`(N, C_{in}, H_{in}, W_{in})` - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where :math:`H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\_size[0] - 1) - 1) / stride[0] + 1)` :math:`W_{out} = floor((W_{in} + 2 * padding[1] - dilation[1] * (kernel\_size[1] - 1) - 1) / stride[1] + 1)` Attributes: weight (Tensor): the learnable weights of the module of shape (out_channels, in_channels, kernel_size[0], kernel_size[1]) bias (Tensor): the learnable bias of the module of shape (out_channels) Examples:: >>> # With square kernels and equal stride >>> m = nn.Conv2d(16, 33, 3, stride=2) >>> # non-square kernels and unequal stride and with padding >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) >>> # non-square kernels and unequal stride and with padding and dilation >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)) >>> input = autograd.Variable(torch.randn(20, 16, 50, 100)) >>> output = m(input) .. _cross-correlation: https://en.wikipedia.org/wiki/Cross-correlation .. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(Conv2d, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias) def forward(self, input): if isinstance(input, Variable): return F.conv2d(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) elif isinstance(input, tuple) or isinstance(input, list): return my_data_parallel(self, input) else: raise RuntimeError('unknown input type') class _ConvTransposeMixin(object): def forward(self, input, output_size=None): output_padding = self._output_padding(input, output_size) func = self._backend.ConvNd( self.stride, self.padding, self.dilation, self.transposed, output_padding, self.groups) if self.bias is None: return func(input, self.weight) else: return func(input, self.weight, self.bias) def _output_padding(self, input, output_size): if output_size is None: return self.output_padding output_size = list(output_size) k = input.dim() - 2 if len(output_size) == k + 2: output_size = output_size[-2:] if len(output_size) != k: raise ValueError( "output_size must have {} or {} elements (got {})" .format(k, k + 2, len(output_size))) def dim_size(d): return ((input.size(d + 2) - 1) * self.stride[d] - 2 * self.padding[d] + self.kernel_size[d]) min_sizes = [dim_size(d) for d in range(k)] max_sizes = [min_sizes[d] + self.stride[d] - 1 for d in range(k)] for size, min_size, max_size in zip(output_size, min_sizes, max_sizes): if size < min_size or size > max_size: raise ValueError(( "requested an output size of {}, but valid sizes range " "from {} to {} (for an input of {})").format( output_size, min_sizes, max_sizes, input.size()[2:])) return tuple([output_size[d] - min_sizes[d] for d in range(k)]) class ConvTranspose2d(_ConvTransposeMixin, _ConvNd): r"""Applies a 2D transposed convolution operator over an input image composed of several input planes. This module can be seen as the gradient of Conv2d with respect to its input. It is also known as a fractionally-strided convolution or a deconvolution (although it is not an actual deconvolution operation). | :attr:`stride` controls the stride for the cross-correlation. | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides for :attr:`padding` number of points. | If :attr:`output_padding` is non-zero, then the output is implicitly zero-padded on one side for :attr:`output_padding` number of points. | :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. | :attr:`groups` controls the connections between inputs and outputs. `in_channels` and `out_channels` must both be divisible by `groups`. | At groups=1, all inputs are convolved to all outputs. | At groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels, and producing half the output channels, and both subsequently concatenated. At groups=`in_channels`, each input channel is convolved with its own set of filters (of size `out_channels // in_channels`). The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding` can either be: - a single ``int`` -- in which case the same value is used for the height and width dimensions - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 output_padding (int or tuple, optional): Zero-padding added to one side of the output. Default: 0 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If True, adds a learnable bias to the output. Default: True dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 Shape: - Input: :math:`(N, C_{in}, H_{in}, W_{in})` - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where :math:`H_{out} = (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\_size[0] + output\_padding[0]` :math:`W_{out} = (W_{in} - 1) * stride[1] - 2 * padding[1] + kernel\_size[1] + output\_padding[1]` Attributes: weight (Tensor): the learnable weights of the module of shape (in_channels, out_channels, kernel_size[0], kernel_size[1]) bias (Tensor): the learnable bias of the module of shape (out_channels) Examples:: >>> # With square kernels and equal stride >>> m = nn.ConvTranspose2d(16, 33, 3, stride=2) >>> # non-square kernels and unequal stride and with padding >>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) >>> input = autograd.Variable(torch.randn(20, 16, 50, 100)) >>> output = m(input) >>> # exact output size can be also specified as an argument >>> input = autograd.Variable(torch.randn(1, 16, 12, 12)) >>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1) >>> upsample = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1) >>> h = downsample(input) >>> h.size() torch.Size([1, 16, 6, 6]) >>> output = upsample(h, output_size=input.size()) >>> output.size() torch.Size([1, 16, 12, 12]) .. _cross-correlation: https://en.wikipedia.org/wiki/Cross-correlation .. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) output_padding = _pair(output_padding) super(ConvTranspose2d, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, True, output_padding, groups, bias) def forward(self, input, output_size=None): output_padding = self._output_padding(input, output_size) if isinstance(input, Variable): return F.conv_transpose2d( input, self.weight, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation) elif isinstance(input, tuple) or isinstance(input, list): return my_data_parallel(self, input) else: raise RuntimeError('unknown input type') class Threshold(Module): def __init__(self, threshold, value, inplace=False): super(Threshold, self).__init__() self.threshold = threshold self.value = value self.inplace = inplace def forward(self, input): if isinstance(input, Variable): return F.threshold(input, self.threshold, self.value, self.inplace) elif isinstance(input, tuple) or isinstance(input, list): return my_data_parallel(self, input) else: raise RuntimeError('unknown input type') def __repr__(self): inplace_str = ', inplace' if self.inplace else '' return self.__class__.__name__ + ' (' \ + str(self.threshold) \ + ', ' + str(self.value) \ + inplace_str + ')' class ReLU(Threshold): """Applies the rectified linear unit function element-wise :math:`{ReLU}(x)= max(0, x)` Args: inplace: can optionally do the operation in-place. Default: False Shape: - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - Output: :math:`(N, *)`, same shape as the input Examples:: >>> m = nn.ReLU() >>> input = autograd.Variable(torch.randn(2)) >>> print(input) >>> print(m(input)) """ def __init__(self, inplace=False): super(ReLU, self).__init__(0, 0, inplace) def __repr__(self): inplace_str = 'inplace' if self.inplace else '' return self.__class__.__name__ + ' (' \ + inplace_str + ')' class Sigmoid(Module): """Applies the element-wise function :math:`f(x) = 1 / ( 1 + exp(-x))` Shape: - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - Output: :math:`(N, *)`, same shape as the input Examples:: >>> m = nn.Sigmoid() >>> input = autograd.Variable(torch.randn(2)) >>> print(input) >>> print(m(input)) """ def forward(self, input): if isinstance(input, Variable): return torch.sigmoid(input) elif isinstance(input, tuple) or isinstance(input, list): return my_data_parallel(self, input) else: raise RuntimeError('unknown input type') def __repr__(self): return self.__class__.__name__ + ' ()' class MaxPool2d(Module): r"""Applies a 2D max pooling over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`, output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)` can be precisely described as: .. math:: \begin{array}{ll} out(N_i, C_j, h, w) = \max_{{m}=0}^{kH-1} \max_{{n}=0}^{kW-1} input(N_i, C_j, stride[0] * h + m, stride[1] * w + n) \end{array} | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides for :attr:`padding` number of points | :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: - a single ``int`` -- in which case the same value is used for the height and width dimension - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension Args: kernel_size: the size of the window to take a max over stride: the stride of the window. Default value is :attr:`kernel_size` padding: implicit zero padding to be added on both sides dilation: a parameter that controls the stride of elements in the window return_indices: if True, will return the max indices along with the outputs. Useful when Unpooling later ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape Shape: - Input: :math:`(N, C, H_{in}, W_{in})` - Output: :math:`(N, C, H_{out}, W_{out})` where :math:`H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\_size[0] - 1) - 1) / stride[0] + 1)` :math:`W_{out} = floor((W_{in} + 2 * padding[1] - dilation[1] * (kernel\_size[1] - 1) - 1) / stride[1] + 1)` Examples:: >>> # pool of square window of size=3, stride=2 >>> m = nn.MaxPool2d(3, stride=2) >>> # pool of non-square window >>> m = nn.MaxPool2d((3, 2), stride=(2, 1)) >>> input = autograd.Variable(torch.randn(20, 16, 50, 32)) >>> output = m(input) .. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md """ def __init__(self, kernel_size, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False): super(MaxPool2d, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation self.return_indices = return_indices self.ceil_mode = ceil_mode def forward(self, input): if isinstance(input, Variable): return F.max_pool2d(input, self.kernel_size, self.stride, \ self.padding, self.dilation, self.ceil_mode, \ self.return_indices) elif isinstance(input, tuple) or isinstance(input, list): return my_data_parallel(self, input) else: raise RuntimeError('unknown input type') def __repr__(self): kh, kw = _pair(self.kernel_size) dh, dw = _pair(self.stride) padh, padw = _pair(self.padding) dilh, dilw = _pair(self.dilation) padding_str = ', padding=(' + str(padh) + ', ' + str(padw) + ')' \ if padh != 0 and padw != 0 else '' dilation_str = (', dilation=(' + str(dilh) + ', ' + str(dilw) + ')' if dilh != 0 and dilw != 0 else '') return self.__class__.__name__ + ' (' \ + 'size=(' + str(kh) + ', ' + str(kw) + ')' \ + ', stride=(' + str(dh) + ', ' + str(dw) + ')' \ + padding_str + dilation_str + ')' class AvgPool2d(Module): r"""Applies a 2D average pooling over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`, output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)` can be precisely described as: .. math:: \begin{array}{ll} out(N_i, C_j, h, w) = 1 / (kH * kW) * \sum_{{m}=0}^{kH-1} \sum_{{n}=0}^{kW-1} input(N_i, C_j, stride[0] * h + m, stride[1] * w + n) \end{array} | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides for :attr:`padding` number of points The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be: - a single ``int`` -- in which case the same value is used for the height and width dimension - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension Args: kernel_size: the size of the window stride: the stride of the window. Default value is :attr:`kernel_size` padding: implicit zero padding to be added on both sides ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape count_include_pad: when True, will include the zero-padding in the averaging calculation Shape: - Input: :math:`(N, C, H_{in}, W_{in})` - Output: :math:`(N, C, H_{out}, W_{out})` where :math:`H_{out} = floor((H_{in} + 2 * padding[0] - kernel\_size[0]) / stride[0] + 1)` :math:`W_{out} = floor((W_{in} + 2 * padding[1] - kernel\_size[1]) / stride[1] + 1)` Examples:: >>> # pool of square window of size=3, stride=2 >>> m = nn.AvgPool2d(3, stride=2) >>> # pool of non-square window >>> m = nn.AvgPool2d((3, 2), stride=(2, 1)) >>> input = autograd.Variable(torch.randn(20, 16, 50, 32)) >>> output = m(input) """ def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True): super(AvgPool2d, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.ceil_mode = ceil_mode self.count_include_pad = count_include_pad def forward(self, input): if isinstance(input, Variable): return F.avg_pool2d(input, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) elif isinstance(input, tuple) or isinstance(input, list): return my_data_parallel(self, input) else: raise RuntimeError('unknown input type') class AdaptiveAvgPool2d(Module): """Applies a 2D adaptive average pooling over an input signal composed of several input planes. The output is of size H x W, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size of the image of the form H x W. Can be a tuple (H, W) or a single number H for a square image H x H Examples: >>> # target output size of 5x7 >>> m = nn.AdaptiveAvgPool2d((5,7)) >>> input = autograd.Variable(torch.randn(1, 64, 8, 9)) >>> output = m(input) >>> # target output size of 7x7 (square) >>> m = nn.AdaptiveAvgPool2d(7) >>> input = autograd.Variable(torch.randn(1, 64, 10, 9)) >>> output = m(input) """ def __init__(self, output_size): super(AdaptiveAvgPool2d, self).__init__() self.output_size = output_size def forward(self, input): if isinstance(input, Variable): return F.adaptive_avg_pool2d(input, self.output_size) elif isinstance(input, tuple) or isinstance(input, list): return my_data_parallel(self, input) else: raise RuntimeError('unknown input type') def __repr__(self): return self.__class__.__name__ + ' (' \ + 'output_size=' + str(self.output_size) + ')' class Dropout2d(Module): r"""Randomly zeroes whole channels of the input tensor. The channels to zero-out are randomized on every forward call. *Usually the input comes from Conv2d modules.* As described in the paper `Efficient Object Localization Using Convolutional Networks`_ , if adjacent pixels within feature maps are strongly correlated (as is normally the case in early convolution layers) then iid dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, :func:`nn.Dropout2d` will help promote independence between feature maps and should be used instead. Args: p (float, optional): probability of an element to be zeroed. inplace (bool, optional): If set to True, will do this operation in-place Shape: - Input: :math:`(N, C, H, W)` - Output: :math:`(N, C, H, W)` (same shape as input) Examples:: >>> m = nn.Dropout2d(p=0.2) >>> input = autograd.Variable(torch.randn(20, 16, 32, 32)) >>> output = m(input) .. _Efficient Object Localization Using Convolutional Networks: http://arxiv.org/abs/1411.4280 """ def __init__(self, p=0.5, inplace=False): super(Dropout2d, self).__init__() if p < 0 or p > 1: raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p)) self.p = p self.inplace = inplace self.drop = torch.nn.Dropout2d(p=p, inplace=inplace) def forward(self, input): if isinstance(input, Variable): return self.drop(input) elif isinstance(input, tuple) or isinstance(input, list): return my_data_parallel(self.drop, input) else: raise RuntimeError('unknown input type') def __repr__(self): inplace_str = ', inplace' if self.inplace else '' return self.__class__.__name__ + ' (' \ + 'p=' + str(self.p) \ + inplace_str + ')' class Linear(Module): r"""Applies a linear transformation to the incoming data: :math:`y = Ax + b` Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn an additive bias. Default: True Shape: - Input: :math:`(N, *, in\_features)` where `*` means any number of additional dimensions - Output: :math:`(N, *, out\_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape (out_features x in_features) bias: the learnable bias of the module of shape (out_features) Examples:: >>> m = nn.Linear(20, 30) >>> input = autograd.Variable(torch.randn(128, 20)) >>> output = m(input) >>> print(output.size()) """ def __init__(self, in_features, out_features, bias=True): super(Linear, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1. / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input): if isinstance(input, Variable): return F.linear(input, self.weight, self.bias) elif isinstance(input, tuple) or isinstance(input, list): return my_data_parallel(self, input) else: raise RuntimeError('unknown input type') def __repr__(self): return self.__class__.__name__ + ' (' \ + str(self.in_features) + ' -> ' \ + str(self.out_features) + ')'