Commit 2d192cc9 authored by charlie's avatar charlie
Browse files

Formatting

parent 1decc5fc
......@@ -151,7 +151,7 @@ struct parse_pad : op_parser<parse_pad>
if(args.front()->get_shape().dynamic())
{
MIGRAPHX_THROW("PARSE_PAD: reflect padding with dynamic shape not supported");
}
}
return reflect_pad(info, pads, args.front());
}
if(mode != "constant")
......
......@@ -531,7 +531,7 @@ bool operator!=(const std::size_t& x, const shape::dynamic_dimension& y) { retur
shape::dynamic_dimension operator+(const shape::dynamic_dimension& x, const std::size_t& y)
{
return {x.min + y, x.max + y, x.opt + y};
return {x.min + y, x.max + y, x.opt + y};
}
shape::dynamic_dimension operator+(const std::size_t& x, const shape::dynamic_dimension& y)
{
......@@ -539,11 +539,11 @@ shape::dynamic_dimension operator+(const std::size_t& x, const shape::dynamic_di
}
shape::dynamic_dimension operator-(const shape::dynamic_dimension& x, const std::size_t& y)
{
return {x.min - y, x.max - y, x.opt - y};
return {x.min - y, x.max - y, x.opt - y};
}
shape::dynamic_dimension operator-(const std::size_t& x, const shape::dynamic_dimension& y)
{
return {x - y.min, x - y.max, x - y.opt};
return {x - y.min, x - y.max, x - y.opt};
}
bool operator==(const shape& x, const shape& y)
......
......@@ -4172,6 +4172,7 @@ def pad_reflect_multiaxis_test():
return ([arg_pad, node], [x], [y])
@onnx_test
def pad_attr_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
......@@ -4184,6 +4185,7 @@ def pad_attr_dyn_test():
return ([node], [x], [y])
@onnx_test
def pad_cnst_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
......@@ -4199,12 +4201,11 @@ def pad_cnst_dyn_test():
outputs=['arg_pad'],
value=pad_tensor)
node = onnx.helper.make_node('Pad',
inputs=['0', 'arg_pad'],
outputs=['1'])
node = onnx.helper.make_node('Pad', inputs=['0', 'arg_pad'], outputs=['1'])
return ([arg_pad, node], [x], [y])
@onnx_test
def pad_dyn_reflect_error():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
......@@ -4218,6 +4219,7 @@ def pad_dyn_reflect_error():
return ([node], [x], [y])
@onnx_test
def pow_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
......
......@@ -4010,13 +4010,14 @@ TEST_CASE(pad_attr_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {{2, 4, 2}, {2, 4, 2}}});
auto x = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{2, 4, 2}, {2, 4, 2}}});
auto ret = mm->add_instruction(migraphx::make_op("pad", {{"pads", {1, 1, 1, 1}}}), x);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 4, 2}, {2, 4, 2}};
auto prog = parse_onnx("pad_attr_dyn_test.onnx", options);
auto prog = parse_onnx("pad_attr_dyn_test.onnx", options);
EXPECT(p == prog);
}
......@@ -4024,14 +4025,15 @@ TEST_CASE(pad_cnst_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {{2, 4, 2}, {2, 4, 2}}});
auto x = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{2, 4, 2}, {2, 4, 2}}});
mm->add_literal({migraphx::shape{migraphx::shape::int32_type, {4}}, {0, 2, 0, 1}});
auto ret = mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 2, 0, 1}}}), x);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 4, 2}, {2, 4, 2}};
auto prog = parse_onnx("pad_cnst_dyn_test.onnx", options);
auto prog = parse_onnx("pad_cnst_dyn_test.onnx", options);
EXPECT(p == prog);
}
......
......@@ -1667,36 +1667,32 @@ TEST_CASE(pad_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {2, 3, 5, 5}};
expect_shape(output,
migraphx::make_op("pad", {{"pads", {0, 0, 1, 1, 0, 0, 1, 1}}}),
input);
expect_shape(output, migraphx::make_op("pad", {{"pads", {0, 0, 1, 1, 0, 0, 1, 1}}}), input);
}
TEST_CASE(pad_shape1)
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {2, 3, 6, 6}};
expect_shape(output,
migraphx::make_op("pad", {{"pads", {0, 0, 2, 2, 0, 0, 1, 1}}}),
input);
expect_shape(output, migraphx::make_op("pad", {{"pads", {0, 0, 2, 2, 0, 0, 1, 1}}}), input);
}
TEST_CASE(pad_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, 2}, {3, 3, 0}, {3, 3, 0}, {3, 3, 0}}};
migraphx::shape output{migraphx::shape::float_type, {{1, 4, 2}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}};
expect_shape(output,
migraphx::make_op("pad", {{"pads", {0, 0, 1, 1, 0, 0, 1, 1}}}),
input);
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 2}, {3, 3, 0}, {3, 3, 0}, {3, 3, 0}}};
migraphx::shape output{migraphx::shape::float_type,
{{1, 4, 2}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}};
expect_shape(output, migraphx::make_op("pad", {{"pads", {0, 0, 1, 1, 0, 0, 1, 1}}}), input);
}
TEST_CASE(pad_dyn_shape1)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, 2}, {3, 3, 0}, {3, 5, 5}, {3, 5, 5}}};
migraphx::shape output{migraphx::shape::float_type, {{1, 4, 2}, {3, 3, 0}, {5, 7, 7}, {5, 7, 7}}};
expect_shape(output,
migraphx::make_op("pad", {{"pads", {0, 0, 1, 1, 0, 0, 1, 1}}}),
input);
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 2}, {3, 3, 0}, {3, 5, 5}, {3, 5, 5}}};
migraphx::shape output{migraphx::shape::float_type,
{{1, 4, 2}, {3, 3, 0}, {5, 7, 7}, {5, 7, 7}}};
expect_shape(output, migraphx::make_op("pad", {{"pads", {0, 0, 1, 1, 0, 0, 1, 1}}}), input);
}
TEST_CASE(pooling_shape0)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment