Commit cb265820 authored by charlie's avatar charlie
Browse files

Merge branch 'dyn_pad' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_model_test

parents b4b7f8c1 33be2634
......@@ -59,18 +59,29 @@ struct pad
std::string name() const { return "pad"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
auto&& idims = inputs.front().lens();
std::vector<std::size_t> rdims(idims.begin(), idims.end());
std::size_t num_dims = rdims.size();
for(std::size_t i = 0; i < num_dims; i++)
check_shapes{inputs, *this, true}.has(1);
const auto& s0 = inputs.front();
if(s0.dynamic())
{
rdims[i] += pads[i] + pads[i + num_dims];
auto out_dyn_dims = s0.dyn_dims();
for(std::size_t i = 0; i < s0.ndim(); ++i)
{
out_dyn_dims[i] = out_dyn_dims[i] + pads[i] + pads[i + s0.ndim()];
}
return {s0.type(), out_dyn_dims};
}
else
{
auto&& idims = s0.lens();
std::vector<std::size_t> rdims(idims.begin(), idims.end());
std::size_t num_dims = rdims.size();
for(std::size_t i = 0; i < num_dims; i++)
{
rdims[i] += pads[i] + pads[i + num_dims];
}
shape s{s0.type(), rdims};
return s;
}
shape s{inputs.front().type(), rdims};
return s;
}
std::size_t pad_ndims() const
......
......@@ -107,6 +107,11 @@ struct shape
friend bool operator==(const std::size_t& x, const dynamic_dimension& y);
friend bool operator!=(const dynamic_dimension& x, const std::size_t& y);
friend bool operator!=(const std::size_t& x, const dynamic_dimension& y);
// add and subtract fixed std::size_t dimension
friend dynamic_dimension operator+(const dynamic_dimension& x, const std::size_t& y);
friend dynamic_dimension operator+(const std::size_t& x, const dynamic_dimension& y);
friend dynamic_dimension operator-(const dynamic_dimension& x, const std::size_t& y);
};
static const std::vector<type_t>& types();
......
......@@ -147,7 +147,13 @@ struct parse_pad : op_parser<parse_pad>
{
auto mode = info.attributes.at("mode").s();
if(mode == "reflect")
{
if(args.front()->get_shape().dynamic())
{
MIGRAPHX_THROW("PARSE_PAD: reflect padding with dynamic shape not supported");
}
return reflect_pad(info, pads, args.front());
}
if(mode != "constant")
{
MIGRAPHX_THROW(
......
......@@ -529,6 +529,29 @@ bool operator==(const std::size_t& x, const shape::dynamic_dimension& y) { retur
bool operator!=(const shape::dynamic_dimension& x, const std::size_t& y) { return not(x == y); }
bool operator!=(const std::size_t& x, const shape::dynamic_dimension& y) { return not(x == y); }
shape::dynamic_dimension operator+(const shape::dynamic_dimension& x, const std::size_t& y)
{
return {x.min + y, x.max + y, x.opt == 0 ? 0 : x.opt + y};
}
shape::dynamic_dimension operator+(const std::size_t& x, const shape::dynamic_dimension& y)
{
return y + x;
}
shape::dynamic_dimension operator-(const shape::dynamic_dimension& x, const std::size_t& y)
{
assert(x.min >= y);
assert(x.max >= y);
if(x.opt == 0)
{
return {x.min - y, x.max - y, 0};
}
else
{
assert(x.opt >= y);
return {x.min - y, x.max - y, x.opt - y};
}
}
bool operator==(const shape& x, const shape& y)
{
if(x.dynamic() and y.dynamic())
......
......@@ -346,10 +346,10 @@ struct ref_pad
std::string name() const { return "ref::pad"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
argument compute(context&, const dyn_output& dyn_out, std::vector<argument> args) const
{
assert(output_shape.standard());
argument result{output_shape};
assert(dyn_out.computed_shape.standard());
argument result{dyn_out.computed_shape};
result.visit([&](auto output) {
using type = typename decltype(output)::value_type;
std::fill(output.begin(), output.end(), pad_clamp<type>(op.value));
......
......@@ -4337,6 +4337,53 @@ def pad_reflect_multiaxis_test():
return ([arg_pad, node], [x], [y])
@onnx_test()
def pad_attr_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, None])
node = onnx.helper.make_node('Pad',
inputs=['0'],
pads=[1, 1, 1, 1],
outputs=['1'])
return ([node], [x], [y])
@onnx_test()
def pad_cnst_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, None])
sizes = np.array([0, 2, 0, 1])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
node = onnx.helper.make_node('Pad', inputs=['0', 'arg_pad'], outputs=['1'])
return ([arg_pad, node], [x], [y])
@onnx_test()
def pad_dyn_reflect_error():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, None])
node = onnx.helper.make_node('Pad',
mode='reflect',
inputs=['0'],
pads=[0, 2, 0, 1],
outputs=['1'])
return ([node], [x], [y])
@onnx_test()
def pow_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
......
......@@ -4156,6 +4156,44 @@ TEST_CASE(pad_3arg_test)
EXPECT(p == prog);
}
TEST_CASE(pad_attr_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{2, 4, 2}, {2, 4, 2}}});
auto ret = mm->add_instruction(migraphx::make_op("pad", {{"pads", {1, 1, 1, 1}}}), x);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 4, 2}, {2, 4, 2}};
auto prog = parse_onnx("pad_attr_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(pad_cnst_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{2, 4, 2}, {2, 4, 2}}});
mm->add_literal({migraphx::shape{migraphx::shape::int32_type, {4}}, {0, 2, 0, 1}});
auto ret = mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 2, 0, 1}}}), x);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 4, 2}, {2, 4, 2}};
auto prog = parse_onnx("pad_cnst_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(pad_dyn_reflect_error)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 2};
EXPECT(test::throws([&] { migraphx::parse_onnx("pad_dyn_reflect_error.onnx", options); }));
}
TEST_CASE(pad_reflect_test)
{
migraphx::program p;
......
......@@ -1716,6 +1716,38 @@ TEST_CASE(nms_shape)
score_thres_s);
}
TEST_CASE(pad_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {2, 3, 5, 5}};
expect_shape(output, migraphx::make_op("pad", {{"pads", {0, 0, 1, 1, 0, 0, 1, 1}}}), input);
}
TEST_CASE(pad_shape1)
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {2, 3, 6, 6}};
expect_shape(output, migraphx::make_op("pad", {{"pads", {0, 0, 2, 2, 0, 0, 1, 1}}}), input);
}
TEST_CASE(pad_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 2}, {3, 3, 0}, {3, 5, 0}, {3, 5, 0}}};
migraphx::shape output{migraphx::shape::float_type,
{{1, 4, 2}, {3, 3, 0}, {5, 7, 0}, {5, 7, 0}}};
expect_shape(output, migraphx::make_op("pad", {{"pads", {0, 0, 1, 1, 0, 0, 1, 1}}}), input);
}
TEST_CASE(pad_dyn_shape1)
{
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 2}, {3, 3, 0}, {3, 5, 5}, {3, 5, 5}}};
migraphx::shape output{migraphx::shape::float_type,
{{1, 4, 2}, {3, 3, 0}, {5, 7, 7}, {5, 7, 7}}};
expect_shape(output, migraphx::make_op("pad", {{"pads", {0, 0, 1, 1, 0, 0, 1, 1}}}), input);
}
TEST_CASE(pooling_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
......
......@@ -5099,6 +5099,26 @@ TEST_CASE(pad_test_lowest_half)
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(pad_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{2, 4, 2}, {2, 4, 2}}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(migraphx::make_op("pad", {{"pads", {1, 1, 1, 1}}}), x);
p.compile(migraphx::ref::target{});
std::vector<float> data = {1, 2, 3, 4};
migraphx::parameter_map params;
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {2, 2}};
params["x"] = migraphx::argument(input_fixed_shape, data.data());
auto result = p.eval(params).back();
std::vector<float> results_vector(16);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4, 0, 0, 0, 0, 0};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(pointwise_test)
{
migraphx::program p;
......
......@@ -174,6 +174,28 @@ TEST_CASE(dynamic_dimension_size_t_compares)
EXPECT(static_cast<std::size_t>(2) != b);
}
TEST_CASE(dynamic_dimension_add_sub_fixed)
{
using migraphx::shape;
auto a = shape::dynamic_dimension{2, 5, 2};
auto b = shape::dynamic_dimension{3, 6, 3};
EXPECT((a + 1) == b);
EXPECT((1 + a) == b);
EXPECT((b - 1) == a);
auto c = shape::dynamic_dimension{4, 7, 4};
EXPECT((a + 2) == c);
EXPECT((2 + a) == c);
EXPECT((c - 2) == a);
auto d = shape::dynamic_dimension{4, 8, 0};
auto e = shape::dynamic_dimension{2, 6, 0};
EXPECT((d - 2) == e);
EXPECT((e + 2) == d);
EXPECT((2 + e) == d);
}
TEST_CASE(test_shape_dynamic_errors)
{
using migraphx::shape;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment