Commit d1537169 authored by Khalique's avatar Khalique
Browse files

add pytorch tests

parent 7ea877a9
 conv-example:­
8
0
1
23"Conv*
dilations@@ *
strides@@  test_convZ
0




 Z
1




Z
2

b
3




B
......@@ -471,10 +471,6 @@ def const_of_shape_int64_test():
onnx.save(model_def, 'const_of_shape_int64_test.onnx')
def const_of_shape_no_value_attr_test():
tensor_val = onnx.helper.make_tensor(
'value',
onnx.TensorProto.INT64, [1],[10]
)
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(
name = 'shape_tensor',
......@@ -506,6 +502,187 @@ def const_of_shape_no_value_attr_test():
model_def = helper.make_model(graph_def, producer_name='constant-of-shape')
onnx.save(model_def, 'const_of_shape_no_value_attr_test.onnx')
def conv_bias_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 28, 28])
node = onnx.helper.make_node(
'Conv',
inputs=['0', '1', '2'],
outputs=['3'],
dilations = [1, 1],
strides = [1, 1]
)
graph_def = helper.make_graph(
[node],
'test_conv',
[x, y, z],
[out],
)
model_def = helper.make_model(graph_def, producer_name='conv-example')
onnx.save(model_def, 'conv_bias_test.onnx')
def conv_bn_relu_maxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1])
m = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1])
n = helper.make_tensor_value_info('4', TensorProto.FLOAT, [1])
k = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1])
l = helper.make_tensor_value_info('6', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('10', TensorProto.FLOAT, [1, 1, 14, 14])
node0 = onnx.helper.make_node(
'Conv',
inputs=['0', '1', '2'],
outputs=['7'],
dilations = [1, 1],
strides = [1, 1],
pads = [0, 0, 0, 0]
)
node1 = onnx.helper.make_node(
'BatchNormalization',
inputs=['7', '3', '4', '5', '6'],
outputs=['8'],
epsilon = 9.99999974737875e-06,
momentum = 0.899999976158142
)
node2 = onnx.helper.make_node(
'Relu',
inputs=['8'],
outputs=['9']
)
node3 = onnx.helper.make_node(
'MaxPool',
inputs=['9'],
outputs=['10'],
pads = [0, 0, 0, 0],
strides = [2, 2],
kernel_shape=[2,2]
)
graph_def = helper.make_graph(
[node0, node1, node2, node3],
'test_conv_bn_relu',
[x, y, z, m, n, k, l],
[out],
)
model_def = helper.make_model(graph_def, producer_name='conv_relu-example')
onnx.save(model_def, 'conv_bn_relu_maxpool_test.onnx')
def conv_relu_maxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 1, 14, 14])
node1 = onnx.helper.make_node(
'Conv',
inputs=['0', '1', '2'],
outputs=['3'],
dilations = [1, 1],
strides = [1, 1],
pads = [0, 0, 0, 0]
)
node2 = onnx.helper.make_node(
'Relu',
inputs=['3'],
outputs=['4']
)
node3 = onnx.helper.make_node(
'MaxPool',
inputs=['4'],
outputs=['5'],
pads = [0, 0, 0, 0],
strides = [2, 2],
kernel_shape=[2,2]
)
graph_def = helper.make_graph(
[node1, node2, node3],
'test_conv_relu',
[x, y, z],
[out],
)
model_def = helper.make_model(graph_def, producer_name='conv_relu-example')
onnx.save(model_def, 'conv_relu_maxpool_test.onnx')
def conv_relu_maxpool_x2_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [5, 3, 5, 5])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [5])
m = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 5, 5, 5])
n = helper.make_tensor_value_info('4', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('10', TensorProto.FLOAT, [1, 1, 5, 5])
node1 = onnx.helper.make_node(
'Conv',
inputs=['0', '1', '2'],
outputs=['5'],
dilations = [1, 1],
strides = [1, 1],
pads = [0, 0, 0, 0]
)
node2 = onnx.helper.make_node(
'Relu',
inputs=['5'],
outputs=['6']
)
node3 = onnx.helper.make_node(
'MaxPool',
inputs=['6'],
outputs=['7'],
pads = [0, 0, 0, 0],
strides = [2, 2],
kernel_shape=[2,2]
)
node4 = onnx.helper.make_node(
'Conv',
inputs=['7', '3', '4'],
outputs=['8'],
dilations = [1, 1],
strides = [1, 1],
pads = [0, 0, 0, 0]
)
node5 = onnx.helper.make_node(
'Relu',
inputs=['8'],
outputs=['9']
)
node6 = onnx.helper.make_node(
'MaxPool',
inputs=['9'],
outputs=['10'],
pads = [0, 0, 0, 0],
strides = [2, 2],
kernel_shape=[2,2]
)
graph_def = helper.make_graph(
[node1, node2, node3, node4, node5, node6],
'test_conv_relu2',
[x, y, z, m, n],
[out],
)
model_def = helper.make_model(graph_def, producer_name='conv_relu-example')
onnx.save(model_def, 'conv_relu_maxpool_x2_test.onnx')
def cos_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
......@@ -1334,7 +1511,7 @@ def reducesum_multiaxis_test():
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims = 1
keepdims = 0
)
graph_def = helper.make_graph(
......
......@@ -228,6 +228,87 @@ TEST_CASE(const_of_shape_no_value_attr_test)
EXPECT(p == prog);
}
TEST_CASE(conv_bias_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
p.add_instruction(migraphx::op::add{}, l3, l4);
auto prog = migraphx::parse_onnx("conv_bias_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(conv_bn_relu_maxpool_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}});
auto p3 = p.add_parameter("3", {migraphx::shape::float_type, {1}});
auto p4 = p.add_parameter("4", {migraphx::shape::float_type, {1}});
auto p5 = p.add_parameter("5", {migraphx::shape::float_type, {1}});
auto p6 = p.add_parameter("6", {migraphx::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::batch_norm_inference{1.0e-5f}, l5, p3, p4, p5, p6);
auto l7 = p.add_instruction(migraphx::op::relu{}, l6);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l7);
auto prog = migraphx::parse_onnx("conv_bn_relu_maxpool_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(conv_relu_maxpool_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::relu{}, l5);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto prog = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(conv_relu_maxpool_x2_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {5, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {5}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::relu{}, l5);
auto l7 = p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto l8 = p.add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}});
auto l9 = p.add_parameter("4", {migraphx::shape::float_type, {1}});
auto l10 = p.add_instruction(migraphx::op::convolution{}, l7, l8);
auto l11 = p.add_instruction(migraphx::op::broadcast{axis, l10->get_shape().lens()}, l9);
auto l12 = p.add_instruction(migraphx::op::add{}, l10, l11);
auto l13 = p.add_instruction(migraphx::op::relu{}, l12);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l13);
auto prog = migraphx::parse_onnx("conv_relu_maxpool_x2_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(cos_test)
{
migraphx::program p;
......@@ -670,87 +751,6 @@ TEST_CASE(pow_test)
EXPECT(p == prog);
}
TEST_CASE(pytorch_conv_bias_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
p.add_instruction(migraphx::op::add{}, l3, l4);
auto prog = migraphx::parse_onnx("conv.onnx");
EXPECT(p == prog);
}
TEST_CASE(pytorch_conv_bn_relu_maxpool)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}});
auto p3 = p.add_parameter("3", {migraphx::shape::float_type, {1}});
auto p4 = p.add_parameter("4", {migraphx::shape::float_type, {1}});
auto p5 = p.add_parameter("5", {migraphx::shape::float_type, {1}});
auto p6 = p.add_parameter("6", {migraphx::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::batch_norm_inference{1.0e-5f}, l5, p3, p4, p5, p6);
auto l7 = p.add_instruction(migraphx::op::relu{}, l6);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l7);
auto prog = migraphx::parse_onnx("conv_bn_relu_maxpool.onnx");
EXPECT(p == prog);
}
TEST_CASE(pytorch_conv_relu_maxpool)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::relu{}, l5);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto prog = migraphx::parse_onnx("conv_relu_maxpool.onnx");
EXPECT(p == prog);
}
TEST_CASE(pytorch_conv_relu_maxpool_x2)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {5, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {5}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::relu{}, l5);
auto l7 = p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto l8 = p.add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}});
auto l9 = p.add_parameter("4", {migraphx::shape::float_type, {1}});
auto l10 = p.add_instruction(migraphx::op::convolution{}, l7, l8);
auto l11 = p.add_instruction(migraphx::op::broadcast{axis, l10->get_shape().lens()}, l9);
auto l12 = p.add_instruction(migraphx::op::add{}, l10, l11);
auto l13 = p.add_instruction(migraphx::op::relu{}, l12);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l13);
auto prog = migraphx::parse_onnx("conv_relu_maxpoolX2.onnx");
EXPECT(p == prog);
}
TEST_CASE(reducemean_test)
{
migraphx::program p;
......
......@@ -69,7 +69,7 @@ def test_input():
def test_output():
p = migraphx.parse_onnx("conv_relu_maxpool.onnx")
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
p.compile(migraphx.get_target("gpu"))
r1 = run(p)
......
import migraphx
p = migraphx.parse_onnx("conv_relu_maxpool.onnx")
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
s1 = p.get_shape()
print("Compiling ...")
......
import migraphx
p = migraphx.parse_onnx("conv_relu_maxpool.onnx")
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
print("Compiling ...")
p.compile(migraphx.get_target("gpu"))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment