"ts/webui/src/git@developer.sourcefind.cn:OpenDAS/nni.git" did not exist on "a911b856172741802083ab1ce2f92f09e9ec279f"
Unverified Commit 42408349 authored by Cagri Eryilmaz's avatar Cagri Eryilmaz Committed by GitHub
Browse files

Merge branch 'develop' into unet

parents ebd0bb3a 9054ebbe
...@@ -39,7 +39,7 @@ struct flatten ...@@ -39,7 +39,7 @@ struct flatten
std::string name() const { return "flatten"; } std::string name() const { return "flatten"; }
shape normalize_compute_shape(std::vector<shape> inputs) const shape normalize_compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs, *this}.has(1); check_shapes{inputs, *this}.has(1).standard();
auto&& lens = inputs.front().lens(); auto&& lens = inputs.front().lens();
auto x = auto x =
std::accumulate(lens.begin(), lens.begin() + axis, std::size_t{1}, std::multiplies<>{}); std::accumulate(lens.begin(), lens.begin() + axis, std::size_t{1}, std::multiplies<>{});
......
...@@ -47,7 +47,7 @@ struct parse_generic_op : op_parser<parse_generic_op> ...@@ -47,7 +47,7 @@ struct parse_generic_op : op_parser<parse_generic_op>
bool needs_contiguous(const std::string& op_name) const bool needs_contiguous(const std::string& op_name) const
{ {
return contains({"gather"}, op_name); return contains({"flatten", "gather"}, op_name);
} }
instruction_ref parse(const op_desc& opd, instruction_ref parse(const op_desc& opd,
......
...@@ -131,4 +131,32 @@ TEST_CASE(non_standard_return_input) ...@@ -131,4 +131,32 @@ TEST_CASE(non_standard_return_input)
EXPECT(std::distance(m.begin(), m.end()) == count); EXPECT(std::distance(m.begin(), m.end()) == count);
} }
TEST_CASE(non_standard_flatten_op)
{
migraphx::module m;
auto l = m.add_parameter("x", {migraphx::shape::float_type, {2, 6, 6, 6}});
auto t = m.add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), l);
auto c = m.add_instruction(migraphx::make_op("contiguous"), t);
m.add_instruction(migraphx::make_op("flatten"), c);
auto count = std::distance(m.begin(), m.end());
run_pass(m);
EXPECT(std::distance(m.begin(), m.end()) == count);
}
TEST_CASE(standard_flatten_op)
{
migraphx::module m;
auto l = m.add_parameter("x", {migraphx::shape::float_type, {2, 6, 6, 6}});
auto t = m.add_instruction(
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), l);
auto c = m.add_instruction(migraphx::make_op("contiguous"), t);
m.add_instruction(migraphx::make_op("flatten"), c);
auto count = std::distance(m.begin(), m.end());
run_pass(m);
EXPECT(std::distance(m.begin(), m.end()) == (count - 1));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -1232,98 +1232,6 @@ def equal_bool_test(): ...@@ -1232,98 +1232,6 @@ def equal_bool_test():
return ([node1, node2], [x1, x2], [y]) return ([node1, node2], [x1, x2], [y])
@onnx_test
def greater_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Greater',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def greater_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Greater',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def less_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Less',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def less_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Less',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def lessorequal_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [3])
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'LessOrEqual',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x1, x2], [y])
@onnx_test @onnx_test
def erf_test(): def erf_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
...@@ -1391,6 +1299,29 @@ def flatten_test(): ...@@ -1391,6 +1299,29 @@ def flatten_test():
return ([node, node2], [x], [y, y2]) return ([node, node2], [x], [y, y2])
@onnx_test
def flatten_nonstd_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 5, 4])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [6, 20])
y2 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [2, 60])
trans = helper.make_node(
'Transpose',
inputs=['0'],
outputs=['tx'],
perm=[0, 1, 3, 2],
)
node = onnx.helper.make_node('Flatten',
inputs=['tx'],
axis=2,
outputs=['2'])
node2 = onnx.helper.make_node('Flatten', inputs=['tx'], outputs=['3'])
return ([trans, node, node2], [x], [y, y2])
@onnx_test @onnx_test
def floor_test(): def floor_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
...@@ -1534,6 +1465,44 @@ def globalmaxpool_test(): ...@@ -1534,6 +1465,44 @@ def globalmaxpool_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def greater_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Greater',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def greater_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Greater',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test @onnx_test
def group_conv_test(): def group_conv_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 4, 16, 16]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 4, 16, 16])
...@@ -2231,6 +2200,60 @@ def leaky_relu_test(): ...@@ -2231,6 +2200,60 @@ def leaky_relu_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def less_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Less',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def less_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Less',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def lessorequal_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [3])
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'LessOrEqual',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x1, x2], [y])
@onnx_test @onnx_test
def log_test(): def log_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
......
...@@ -1183,6 +1183,21 @@ TEST_CASE(flatten_test) ...@@ -1183,6 +1183,21 @@ TEST_CASE(flatten_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(flatten_nonstd_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 5, 4}});
auto l1 = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l0);
auto l2 = mm->add_instruction(migraphx::make_op("contiguous"), l1);
mm->add_instruction(migraphx::make_op("flatten", {{"axis", 2}}), l2);
auto l3 = mm->add_instruction(migraphx::make_op("contiguous"), l1);
mm->add_instruction(migraphx::make_op("flatten", {{"axis", 1}}), l3);
auto prog = optimize_onnx("flatten_nonstd_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(floor_test) TEST_CASE(floor_test)
{ {
migraphx::program p; migraphx::program p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment