Commit 144e7567 authored by dreamerlin's avatar dreamerlin
Browse files

use pytest.mark.parametrize

parent 86d9f468
from collections import OrderedDict
from itertools import product
from unittest.mock import patch from unittest.mock import patch
import pytest
import torch import torch
import torch.nn as nn import torch.nn as nn
...@@ -10,30 +9,38 @@ from mmcv.cnn.bricks import (Conv2d, ConvTranspose2d, ConvTranspose3d, Linear, ...@@ -10,30 +9,38 @@ from mmcv.cnn.bricks import (Conv2d, ConvTranspose2d, ConvTranspose3d, Linear,
@patch('torch.__version__', '1.1') @patch('torch.__version__', '1.1')
def test_conv2d(): @pytest.mark.parametrize(
'in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation',
[(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)])
def test_conv2d(in_w, in_h, in_channel, out_channel, kernel_size, stride,
padding, dilation):
""" """
CommandLine: CommandLine:
xdoctest -m tests/test_wrappers.py test_conv2d xdoctest -m tests/test_wrappers.py test_conv2d
""" """
test_cases = OrderedDict([('in_w', [10, 20]), ('in_h', [10, 20]),
('in_channel', [1, 3]), ('out_channel', [1, 3]),
('kernel_size', [3, 5]), ('stride', [1, 2]),
('padding', [0, 1]), ('dilation', [1, 2])])
# train mode # train mode
for in_h, in_w, in_cha, out_cha, k, s, p, d in product(
*list(test_cases.values())):
# wrapper op with 0-dim input # wrapper op with 0-dim input
x_empty = torch.randn(0, in_cha, in_h, in_w) x_empty = torch.randn(0, in_channel, in_h, in_w)
torch.manual_seed(0) torch.manual_seed(0)
wrapper = Conv2d(in_cha, out_cha, k, stride=s, padding=p, dilation=d) wrapper = Conv2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
wrapper_out = wrapper(x_empty) wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference # torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_cha, in_h, in_w).requires_grad_(True) x_normal = torch.randn(3, in_channel, in_h, in_w).requires_grad_(True)
torch.manual_seed(0) torch.manual_seed(0)
ref = nn.Conv2d(in_cha, out_cha, k, stride=s, padding=p, dilation=d) ref = nn.Conv2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
ref_out = ref(x_normal) ref_out = ref(x_normal)
assert wrapper_out.shape[0] == 0 assert wrapper_out.shape[0] == 0
...@@ -46,46 +53,49 @@ def test_conv2d(): ...@@ -46,46 +53,49 @@ def test_conv2d():
assert torch.equal(wrapper(x_normal), ref_out) assert torch.equal(wrapper(x_normal), ref_out)
# eval mode # eval mode
x_empty = torch.randn(0, in_cha, in_h, in_w) x_empty = torch.randn(0, in_channel, in_h, in_w)
wrapper = Conv2d(in_cha, out_cha, k, stride=s, padding=p, dilation=d) wrapper = Conv2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
wrapper.eval() wrapper.eval()
wrapper(x_empty) wrapper(x_empty)
@patch('torch.__version__', '1.1') @patch('torch.__version__', '1.1')
def test_conv_transposed_2d(): @pytest.mark.parametrize(
test_cases = OrderedDict([('in_w', [10, 20]), ('in_h', [10, 20]), 'in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation',
('in_channel', [1, 3]), ('out_channel', [1, 3]), [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)])
('kernel_size', [3, 5]), ('stride', [1, 2]), def test_conv_transposed_2d(in_w, in_h, in_channel, out_channel, kernel_size,
('padding', [0, 1]), ('dilation', [1, 2])]) stride, padding, dilation):
for in_h, in_w, in_cha, out_cha, k, s, p, d in product(
*list(test_cases.values())):
# wrapper op with 0-dim input # wrapper op with 0-dim input
x_empty = torch.randn(0, in_cha, in_h, in_w, requires_grad=True) x_empty = torch.randn(0, in_channel, in_h, in_w, requires_grad=True)
# out padding must be smaller than either stride or dilation # out padding must be smaller than either stride or dilation
op = min(s, d) - 1 op = min(stride, dilation) - 1
torch.manual_seed(0) torch.manual_seed(0)
wrapper = ConvTranspose2d( wrapper = ConvTranspose2d(
in_cha, in_channel,
out_cha, out_channel,
k, kernel_size,
stride=s, stride=stride,
padding=p, padding=padding,
dilation=d, dilation=dilation,
output_padding=op) output_padding=op)
wrapper_out = wrapper(x_empty) wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference # torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_cha, in_h, in_w) x_normal = torch.randn(3, in_channel, in_h, in_w)
torch.manual_seed(0) torch.manual_seed(0)
ref = nn.ConvTranspose2d( ref = nn.ConvTranspose2d(
in_cha, in_channel,
out_cha, out_channel,
k, kernel_size,
stride=s, stride=stride,
padding=p, padding=padding,
dilation=d, dilation=dilation,
output_padding=op) output_padding=op)
ref_out = ref(x_normal) ref_out = ref(x_normal)
...@@ -99,48 +109,50 @@ def test_conv_transposed_2d(): ...@@ -99,48 +109,50 @@ def test_conv_transposed_2d():
assert torch.equal(wrapper(x_normal), ref_out) assert torch.equal(wrapper(x_normal), ref_out)
# eval mode # eval mode
x_empty = torch.randn(0, in_cha, in_h, in_w) x_empty = torch.randn(0, in_channel, in_h, in_w)
wrapper = ConvTranspose2d( wrapper = ConvTranspose2d(
in_cha, out_cha, k, stride=s, padding=p, dilation=d, output_padding=op) in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=op)
wrapper.eval() wrapper.eval()
wrapper(x_empty) wrapper(x_empty)
@patch('torch.__version__', '1.1') @patch('torch.__version__', '1.1')
def test_conv_transposed_3d(): @pytest.mark.parametrize(
test_cases = OrderedDict([('in_w', [10, 20]), ('in_h', [10, 20]), 'in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', # noqa: E501
('in_t', [10, 20]), ('in_channel', [1, 3]), [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)])
('out_channel', [1, 3]), ('kernel_size', [3, 5]), def test_conv_transposed_3d(in_w, in_h, in_t, in_channel, out_channel,
('stride', [1, 2]), ('padding', [0, 1]), kernel_size, stride, padding, dilation):
('dilation', [1, 2])])
for in_h, in_w, in_t, in_cha, out_cha, k, s, p, d in product(
*list(test_cases.values())):
# wrapper op with 0-dim input # wrapper op with 0-dim input
x_empty = torch.randn(0, in_cha, in_t, in_h, in_w, requires_grad=True) x_empty = torch.randn(0, in_channel, in_t, in_h, in_w, requires_grad=True)
# out padding must be smaller than either stride or dilation # out padding must be smaller than either stride or dilation
op = min(s, d) - 1 op = min(stride, dilation) - 1
torch.manual_seed(0) torch.manual_seed(0)
wrapper = ConvTranspose3d( wrapper = ConvTranspose3d(
in_cha, in_channel,
out_cha, out_channel,
k, kernel_size,
stride=s, stride=stride,
padding=p, padding=padding,
dilation=d, dilation=dilation,
output_padding=op) output_padding=op)
wrapper_out = wrapper(x_empty) wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference # torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_cha, in_t, in_h, in_w) x_normal = torch.randn(3, in_channel, in_t, in_h, in_w)
torch.manual_seed(0) torch.manual_seed(0)
ref = nn.ConvTranspose3d( ref = nn.ConvTranspose3d(
in_cha, in_channel,
out_cha, out_channel,
k, kernel_size,
stride=s, stride=stride,
padding=p, padding=padding,
dilation=d, dilation=dilation,
output_padding=op) output_padding=op)
ref_out = ref(x_normal) ref_out = ref(x_normal)
...@@ -154,30 +166,35 @@ def test_conv_transposed_3d(): ...@@ -154,30 +166,35 @@ def test_conv_transposed_3d():
assert torch.equal(wrapper(x_normal), ref_out) assert torch.equal(wrapper(x_normal), ref_out)
# eval mode # eval mode
x_empty = torch.randn(0, in_cha, in_t, in_h, in_w) x_empty = torch.randn(0, in_channel, in_t, in_h, in_w)
wrapper = ConvTranspose3d( wrapper = ConvTranspose3d(
in_cha, out_cha, k, stride=s, padding=p, dilation=d, output_padding=op) in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=op)
wrapper.eval() wrapper.eval()
wrapper(x_empty) wrapper(x_empty)
@patch('torch.__version__', '1.1') @patch('torch.__version__', '1.1')
def test_max_pool_2d(): @pytest.mark.parametrize(
test_cases = OrderedDict([('in_w', [10, 20]), ('in_h', [10, 20]), 'in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation',
('in_channel', [1, 3]), ('out_channel', [1, 3]), [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)])
('kernel_size', [3, 5]), ('stride', [1, 2]), def test_max_pool_2d(in_w, in_h, in_channel, out_channel, kernel_size, stride,
('padding', [0, 1]), ('dilation', [1, 2])]) padding, dilation):
for in_h, in_w, in_cha, out_cha, k, s, p, d in product(
*list(test_cases.values())):
# wrapper op with 0-dim input # wrapper op with 0-dim input
x_empty = torch.randn(0, in_cha, in_h, in_w, requires_grad=True) x_empty = torch.randn(0, in_channel, in_h, in_w, requires_grad=True)
wrapper = MaxPool2d(k, stride=s, padding=p, dilation=d) wrapper = MaxPool2d(
kernel_size, stride=stride, padding=padding, dilation=dilation)
wrapper_out = wrapper(x_empty) wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference # torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_cha, in_h, in_w) x_normal = torch.randn(3, in_channel, in_h, in_w)
ref = nn.MaxPool2d(k, stride=s, padding=p, dilation=d) ref = nn.MaxPool2d(
kernel_size, stride=stride, padding=padding, dilation=dilation)
ref_out = ref(x_normal) ref_out = ref(x_normal)
assert wrapper_out.shape[0] == 0 assert wrapper_out.shape[0] == 0
...@@ -187,23 +204,21 @@ def test_max_pool_2d(): ...@@ -187,23 +204,21 @@ def test_max_pool_2d():
@patch('torch.__version__', '1.1') @patch('torch.__version__', '1.1')
def test_max_pool_3d(): @pytest.mark.parametrize(
test_cases = OrderedDict([('in_w', [10, 20]), ('in_h', [10, 20]), 'in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', # noqa: E501
('in_t', [10, 20]), ('in_channel', [1, 3]), [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)])
('out_channel', [1, 3]), ('kernel_size', [3, 5]), def test_max_pool_3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size,
('stride', [1, 2]), ('padding', [0, 1]), stride, padding, dilation):
('dilation', [1, 2])])
for in_h, in_w, in_t, in_cha, out_cha, k, s, p, d in product(
*list(test_cases.values())):
# wrapper op with 0-dim input # wrapper op with 0-dim input
x_empty = torch.randn(0, in_cha, in_t, in_h, in_w, requires_grad=True) x_empty = torch.randn(0, in_channel, in_t, in_h, in_w, requires_grad=True)
wrapper = MaxPool3d(k, stride=s, padding=p, dilation=d) wrapper = MaxPool3d(
kernel_size, stride=stride, padding=padding, dilation=dilation)
wrapper_out = wrapper(x_empty) wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference # torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_cha, in_t, in_h, in_w) x_normal = torch.randn(3, in_channel, in_t, in_h, in_w)
ref = nn.MaxPool3d(k, stride=s, padding=p, dilation=d) ref = nn.MaxPool3d(
kernel_size, stride=stride, padding=padding, dilation=dilation)
ref_out = ref(x_normal) ref_out = ref(x_normal)
assert wrapper_out.shape[0] == 0 assert wrapper_out.shape[0] == 0
...@@ -213,16 +228,9 @@ def test_max_pool_3d(): ...@@ -213,16 +228,9 @@ def test_max_pool_3d():
@patch('torch.__version__', '1.1') @patch('torch.__version__', '1.1')
def test_linear(): @pytest.mark.parametrize('in_w,in_h,in_feature,out_feature', [(10, 10, 1, 1),
test_cases = OrderedDict([ (20, 20, 3, 3)])
('in_w', [10, 20]), def test_linear(in_w, in_h, in_feature, out_feature):
('in_h', [10, 20]),
('in_feature', [1, 3]),
('out_feature', [1, 3]),
])
for in_h, in_w, in_feature, out_feature in product(
*list(test_cases.values())):
# wrapper op with 0-dim input # wrapper op with 0-dim input
x_empty = torch.randn(0, in_feature, requires_grad=True) x_empty = torch.randn(0, in_feature, requires_grad=True)
torch.manual_seed(0) torch.manual_seed(0)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment