Unverified Commit cb722cf9 authored by kahmed10's avatar kahmed10 Committed by GitHub
Browse files

Enable read support for n-dimensional ops (#537)



* initial progress

* formatting

* add pooling changes

* formatting

* change eliminate_pad

* formatting

* rename var

* fomratting

* update op shape test and compute

* formatting

* revert conv constructor

* formatting

* change initializer

* formatting

* fix tidy

* change quant conv and shape check

* add tests and fixes

* formatting

* fix type

* fix conv test

* formatting

* add pooling and bn tests

* formatting

* add inconsistent attr tests

* fix padding issue

* formatting

* fix review comments, remove duplicate test

* formatting

* fix variable

* fix assert bug

* fix attr check

* remove std
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent 93be5e2b
......@@ -185,6 +185,33 @@ def atanh_test():
return ([node], [x], [y])
@onnx_test
def averagepool_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
out = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3])
node = onnx.helper.make_node('AveragePool',
inputs=['0'],
outputs=['1'],
kernel_shape=[3])
return ([node], [x], [out])
@onnx_test
def averagepool_3d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5, 5])
out = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, 3, 3, 3])
node = onnx.helper.make_node('AveragePool',
inputs=['0'],
outputs=['1'],
kernel_shape=[3, 3, 3])
return ([node], [x], [out])
@onnx_test
def averagepool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
......@@ -229,6 +256,43 @@ def averagepool_same_upper_test():
return ([node], [x], [y])
@onnx_test
def batchnorm_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('4', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 3, 5])
node = onnx.helper.make_node('BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batchnorm_3d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5, 5])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('4', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT,
[1, 3, 5, 5, 5])
node = onnx.helper.make_node('BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def cast_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [10])
......@@ -511,6 +575,43 @@ def const_of_shape_no_value_attr_test():
return ([shape_const, node], [], [y])
@onnx_test
def conv_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 3])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_3d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, 3, 3, 3])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_attr_fail_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 3])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
strides=[1, 1],
outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_autopad_fail_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
......
......@@ -156,6 +156,26 @@ TEST_CASE(atanh_test)
EXPECT(p == prog);
}
TEST_CASE(averagepool_1d_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 5}});
p.add_instruction(migraphx::op::pooling{"average", {0}, {1}, {3}}, l0);
auto prog = optimize_onnx("averagepool_1d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(averagepool_3d_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5, 5}});
p.add_instruction(migraphx::op::pooling{"average", {0, 0, 0}, {1, 1, 1}, {3, 3, 3}}, l0);
auto prog = optimize_onnx("averagepool_3d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(averagepool_notset_test)
{
migraphx::program p;
......@@ -201,6 +221,34 @@ TEST_CASE(averagepool_same_upper_test)
EXPECT(p == prog);
}
TEST_CASE(batchnorm_1d_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 5}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {3}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {3}});
auto l3 = p.add_parameter("3", {migraphx::shape::float_type, {3}});
auto l4 = p.add_parameter("4", {migraphx::shape::float_type, {3}});
p.add_instruction(migraphx::op::batch_norm_inference{}, l0, l1, l2, l3, l4);
auto prog = optimize_onnx("batchnorm_1d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(batchnorm_3d_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5, 5}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {3}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {3}});
auto l3 = p.add_parameter("3", {migraphx::shape::float_type, {3}});
auto l4 = p.add_parameter("4", {migraphx::shape::float_type, {3}});
p.add_instruction(migraphx::op::batch_norm_inference{}, l0, l1, l2, l3, l4);
auto prog = optimize_onnx("batchnorm_3d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(cast_test)
{
migraphx::program p;
......@@ -384,6 +432,33 @@ TEST_CASE(conv_autopad_fail_test)
EXPECT(test::throws([&] { optimize_onnx("conv_autopad_fail_test.onnx"); }));
}
TEST_CASE(conv_1d_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 5}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 3}});
p.add_instruction(migraphx::op::convolution{{0}, {1}, {1}}, l0, l1);
auto prog = optimize_onnx("conv_1d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(conv_3d_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5, 5}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3, 3}});
p.add_instruction(migraphx::op::convolution{{0, 0, 0}, {1, 1, 1}, {1, 1, 1}}, l0, l1);
auto prog = optimize_onnx("conv_3d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(conv_attr_fail_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("conv_attr_fail_test.onnx"); }));
}
TEST_CASE(conv_autopad_same_test)
{
migraphx::program p;
......
......@@ -74,6 +74,19 @@ TEST_CASE(convolution_shape)
migraphx::shape weights2{migraphx::shape::float_type, {3, 3}};
throws_shape(migraphx::op::convolution{}, input2, weights2);
throws_shape(migraphx::op::convolution{}, input2, weights);
migraphx::shape output_1d{migraphx::shape::float_type, {4, 4, 1}};
migraphx::shape input_1d{migraphx::shape::float_type, {4, 3, 3}};
migraphx::shape weights_1d{migraphx::shape::float_type, {4, 3, 3}};
expect_shape(output_1d, migraphx::op::convolution{{0}, {1}, {1}}, input_1d, weights_1d);
migraphx::shape output_3d{migraphx::shape::float_type, {4, 4, 1, 1, 1}};
migraphx::shape input_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}};
migraphx::shape weights_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}};
expect_shape(output_3d,
migraphx::op::convolution{{0, 0, 0}, {1, 1, 1}, {1, 1, 1}},
input_3d,
weights_3d);
}
TEST_CASE(quant_convolution_shape)
......@@ -96,6 +109,14 @@ TEST_CASE(quant_convolution_shape)
throws_shape(migraphx::op::quant_convolution{}, input3, weight3);
}
TEST_CASE(inconsistent_attr_shape)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape weights{migraphx::shape::float_type, {4, 3, 3, 3}};
throws_shape(migraphx::op::convolution{{1, 1}, {2}, {3, 3, 3}}, input, weights);
throws_shape(migraphx::op::pooling{"max", {1}, {0}, {1, 1}}, input);
}
TEST_CASE(transpose_shape)
{
migraphx::shape input{migraphx::shape::float_type, {2, 2}};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment