Unverified Commit 658cdab0 authored by Shucai Xiao's avatar Shucai Xiao Committed by GitHub
Browse files

Onnx 1.8 support (#798)



* add support for axes inputs for sequeeze/unsqueeze/reduce_sum

* clang format

* fix build problems

* backup code changes

* clang format

* fix a bug in parsing quantizelinear operator

* clang format

* fix a cppcheck error

* disable different versions of unit tests for different onnx version

* clang format

* upgrade onnx to 1.8

* update onnx to 1.8.1

* disable two more real models

* clang format

* fix review comments

* fix the function of assign axes in parsing the squeeze operator

* add unit tests and fix a bug

* clang format

* fix review comments

* clang format

* fix a build error

* backup code changes

* clang format

* add more unit tests and add parsing opset version

* clang format

* fix cppcheck error

* adding installing the onnx package

* resolve no protobuf compiler

* fix cppcheck error

* add unit tests for more code coverage

* clang format

* try a comment in jenkins build

* include the install onnnx line

* code backup

* reorder the dependenciesd installed

* refine dockerfile

* fix review comments

* clang format
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent 17485202
......@@ -50,8 +50,12 @@ RUN update-locale LANG=en_US.UTF-8
ENV LC_ALL=C.UTF-8
ENV LANG=C.UTF-8
# Install rbuild
RUN pip3 install https://github.com/RadeonOpenCompute/rbuild/archive/master.tar.gz
# Install dependencies
ADD dev-requirements.txt /dev-requirements.txt
ADD requirements.txt /requirements.txt
COPY ./tools/install_prereqs.sh /
RUN /install_prereqs.sh /usr/local / && rm /install_prereqs.sh
# Install yapf
RUN pip3 install yapf==0.28.0
......@@ -60,19 +64,11 @@ RUN pip3 install yapf==0.28.0
ADD doc/requirements.txt /doc-requirements.txt
RUN pip3 install -r /doc-requirements.txt
RUN pip3 install onnx==1.7.0 numpy==1.18.5 typing==3.7.4 pytest==6.0.1
# Download real models to run onnx unit tests
ENV ONNX_HOME=$HOME
COPY ./tools/download_models.sh /
RUN /download_models.sh && rm /download_models.sh
# Install dependencies
ADD dev-requirements.txt /dev-requirements.txt
ADD requirements.txt /requirements.txt
COPY ./tools/install_prereqs.sh /
RUN /install_prereqs.sh /usr/local / && rm /install_prereqs.sh
# Install latest ccache version
RUN cget -p $PREFIX install facebook/zstd@v1.4.5 -X subdir -DCMAKE_DIR=build/cmake
RUN cget -p $PREFIX install ccache@v4.1
......
......@@ -63,6 +63,7 @@ struct onnx_parser
std::size_t default_dim_value = 1;
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims;
bool skip_unknown_operators = false;
int64_t opset_version = 13;
std::unordered_map<std::string, op_func> ops;
......@@ -71,6 +72,8 @@ struct onnx_parser
void parse_undefined(module* mod, const std::string& name);
static int64_t get_opset_version(const onnx::ModelProto& model);
void parse_from(std::istream& is, std::string name = "");
void parse_from(const void* data, std::size_t size);
void parse_graph(module* mod, const onnx::GraphProto& graph);
......
......@@ -29,6 +29,14 @@ static onnx_parser::attribute_map get_attributes(const onnx::NodeProto& node)
static literal
create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, const char* data)
{
// empty input
auto elem_num =
std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies<std::size_t>());
if(elem_num == 0)
{
return {};
}
// in case of scalar constants in onnx file, use dims=1 to fill initializer data
if(dims.empty())
return literal{{shape_type}, data};
......@@ -38,6 +46,15 @@ create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, const
template <class T, MIGRAPHX_REQUIRES(not std::is_pointer<T>{})>
static literal create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, T data)
{
// empty input
auto elem_num =
std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies<std::size_t>());
if(elem_num == 0)
{
return {};
}
// scalar input
if(dims.empty())
return literal{{shape_type}, data.begin(), data.end()};
return literal{{shape_type, dims}, data.begin(), data.end()};
......@@ -210,6 +227,9 @@ void onnx_parser::parse_from(std::istream& is, std::string name)
onnx::ModelProto model;
if(model.ParseFromIstream(&is))
{
auto version = get_opset_version(model);
opset_version = (version == -1) ? opset_version : version;
if(model.has_graph())
{
this->parse_graph(mm, model.graph());
......@@ -227,6 +247,9 @@ void onnx_parser::parse_from(const void* data, std::size_t size)
onnx::ModelProto model;
if(model.ParseFromArray(data, size))
{
auto version = get_opset_version(model);
opset_version = (version == -1) ? opset_version : version;
if(model.has_graph())
{
this->parse_graph(mm, model.graph());
......@@ -238,6 +261,21 @@ void onnx_parser::parse_from(const void* data, std::size_t size)
}
}
int64_t onnx_parser::get_opset_version(const onnx::ModelProto& model)
{
const auto& opset_import = model.opset_import();
int64_t version = -1;
for(const auto& opset : opset_import)
{
if(opset.has_version())
{
version = std::max(version, opset.version());
}
}
return version;
}
void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
{
for(auto&& f : graph.initializer())
......
......@@ -30,7 +30,6 @@ struct parse_generic_op : op_parser<parse_generic_op>
{"Identity", "identity"},
{"LeakyRelu", "leaky_relu"},
{"Log", "log"},
{"LogSoftmax", "logsoftmax"},
{"LRN", "lrn"},
{"Neg", "neg"},
{"Reciprocal", "recip"},
......@@ -40,18 +39,15 @@ struct parse_generic_op : op_parser<parse_generic_op>
{"Sign", "sign"},
{"Sin", "sin"},
{"Sinh", "sinh"},
{"Softmax", "softmax"},
{"Sqrt", "sqrt"},
{"Squeeze", "squeeze"},
{"Tan", "tan"},
{"Tanh", "tanh"},
{"Not", "not"},
{"Unsqueeze", "unsqueeze"}};
{"Not", "not"}};
}
bool needs_contiguous(const std::string& op_name) const
{
return contains({"gather", "squeeze", "unsqueeze"}, op_name);
return contains({"gather"}, op_name);
}
instruction_ref parse(const op_desc& opd,
......
......@@ -16,8 +16,13 @@ struct parse_lessorequal : op_parser<parse_lessorequal>
const onnx_parser::node_info& info,
std::vector<instruction_ref> args) const
{
auto int_res = info.add_instruction(make_op("greater"), args[0], args[1]);
return info.add_instruction(make_op("not"), int_res);
auto in_res = info.add_broadcastable_binary_op("greater", args[0], args[1]);
if(in_res->get_shape().type() != shape::bool_type)
{
in_res = info.add_instruction(make_op("convert", {{"target_type", shape::bool_type}}),
in_res);
}
return info.add_instruction(make_op("not"), in_res);
}
};
......
......@@ -72,6 +72,21 @@ struct parse_quantizelinear : op_parser<parse_quantizelinear>
make_op("convert", {{"target_type", shape::int32_type}}), add_zero_point);
add_zero_point = info.add_broadcastable_binary_op("add", add_zero_point, zero_point);
}
auto s = add_zero_point->get_shape();
const auto& lens = s.lens();
std::vector<int64_t> out_lens(lens.begin(), lens.end());
if(min_arg->get_shape() != s)
{
min_arg = info.add_instruction(make_op("multibroadcast", {{"output_lens", out_lens}}),
min_arg);
}
if(max_arg->get_shape() != s)
{
max_arg = info.add_instruction(make_op("multibroadcast", {{"output_lens", out_lens}}),
max_arg);
}
auto saturated = info.add_instruction(make_op("clip"), add_zero_point, min_arg, max_arg);
return info.add_instruction(make_op("convert", {{"target_type", quant_type}}), saturated);
}
......
......@@ -2,6 +2,7 @@
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -12,18 +13,44 @@ instruction_ref parse_reduce_oper(const std::string& op_name,
onnx_parser::node_info info,
std::vector<instruction_ref> args)
{
std::size_t n_dim = args.front()->get_shape().lens().size();
// default to reduce over all dimensions
std::vector<int64_t> axes(n_dim);
std::iota(axes.begin(), axes.end(), 0);
if(contains(info.attributes, "axes"))
std::vector<int64_t> axes;
if(args.size() == 2)
{
auto arg_axes = args.at(1)->eval();
check_arg_empty(arg_axes, "PARSE_" + op_name + ": cannot handle variable axes!");
axes.clear();
arg_axes.visit([&](auto s) { axes.assign(s.begin(), s.end()); });
}
else if(contains(info.attributes, "axes"))
{
axes.clear();
auto&& attr_axes = info.attributes["axes"].ints();
axes = std::vector<int64_t>(attr_axes.begin(), attr_axes.end());
}
bool noop_with_empty_axes = false;
if(contains(info.attributes, "noop_with_empty_axes"))
{
noop_with_empty_axes = static_cast<bool>(
parser.parse_value(info.attributes.at("noop_with_empty_axes")).at<int>());
}
// empty axes behavior
if(axes.empty())
{
if(noop_with_empty_axes)
{
return args.at(0);
}
else
{
std::size_t n_dim = args.front()->get_shape().lens().size();
axes.resize(n_dim);
std::iota(axes.begin(), axes.end(), 0);
}
}
int keep_dims = 1;
if(contains(info.attributes, "keepdims"))
{
......@@ -32,11 +59,11 @@ instruction_ref parse_reduce_oper(const std::string& op_name,
if(keep_dims == 1)
{
return info.add_instruction(make_op(op_name, {{"axes", axes}}), args);
return info.add_instruction(make_op(op_name, {{"axes", axes}}), args.front());
}
else
{
auto ins = info.add_instruction(make_op(op_name, {{"axes", axes}}), args);
auto ins = info.add_instruction(make_op(op_name, {{"axes", axes}}), args.front());
return info.add_instruction(make_op("squeeze", {{"axes", axes}}), ins);
}
}
......
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/instruction.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_softmax : op_parser<parse_softmax>
{
std::vector<op_desc> operators() const
{
return {{"Softmax", "softmax"}, {"LogSoftmax", "logsoftmax"}};
}
instruction_ref parse(const op_desc& opd,
const onnx_parser& parser,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
// default axis value is -1 for opset 13
int64_t axis = -1;
// axis value is 1 for previous opset versions
if(parser.opset_version < 13)
{
axis = 1;
}
if(contains(info.attributes, "axis"))
{
axis = parser.parse_value(info.attributes.at("axis")).at<int>();
}
return info.add_instruction(make_op(opd.op_name, {{"axis", axis}}), args);
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/instruction.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_squeeze : op_parser<parse_squeeze>
{
std::vector<op_desc> operators() const
{
return {{"Squeeze", "squeeze"}, {"Unsqueeze", "unsqueeze"}};
}
operation assign_axes(operation& op, const std::vector<int64_t>& axes) const
{
auto v = op.to_value();
v["axes"] = axes;
op.from_value(v);
return op;
}
instruction_ref parse(const op_desc& opd,
const onnx_parser& parser,
const onnx_parser::node_info& info,
std::vector<instruction_ref> args) const
{
auto op = parser.load(opd.op_name, info);
std::vector<int64_t> axes;
if(args.size() == 2)
{
auto arg_axes = args.at(1)->eval();
check_arg_empty(arg_axes, "PARSE_" + opd.op_name + ": cannot handle variable axes!");
arg_axes.visit([&](auto s) { axes.assign(s.begin(), s.end()); });
op = assign_axes(op, axes);
}
auto arg = info.make_contiguous(args.front());
return info.add_instruction(op, arg);
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -1595,9 +1595,12 @@ def if_literal_test():
onnx.TensorProto.FLOAT, [5])
else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT, [5])
empty_out = onnx.helper.make_tensor_value_info('empty_out',
onnx.TensorProto.FLOAT, [])
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
y = np.array([5, 4, 3, 2, 1]).astype(np.float32)
z = np.array([]).astype(np.float32)
then_const_node = onnx.helper.make_node(
'Constant',
......@@ -1611,11 +1614,17 @@ def if_literal_test():
outputs=['else_out'],
value=onnx.numpy_helper.from_array(y))
then_body = onnx.helper.make_graph([then_const_node], 'then_body', [],
[then_out])
empty_const_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['empty_out'],
value=onnx.numpy_helper.from_array(z))
else_body = onnx.helper.make_graph([else_const_node], 'else_body', [],
[else_out])
then_body = onnx.helper.make_graph([then_const_node, empty_const_node],
'then_body', [], [then_out])
else_body = onnx.helper.make_graph([else_const_node, empty_const_node],
'else_body', [], [else_out])
cond_input = onnx.helper.make_tensor_value_info('cond',
onnx.TensorProto.BOOL, [])
......@@ -2940,6 +2949,44 @@ def reducesum_test():
return ([node], [x], [y])
@onnx_test
def reducesum_empty_axes_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node('ReduceSum',
inputs=['x', 'axes'],
outputs=['y'],
keepdims=0,
noop_with_empty_axes=False)
return ([node], [x], [y], [axes_tensor])
@onnx_test
def reducesum_noop_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node('ReduceSum',
inputs=['x', 'axes'],
outputs=['y'],
keepdims=0,
noop_with_empty_axes=True)
return ([node], [x], [y], [axes_tensor])
@onnx_test
def reducesum_keepdims_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
......@@ -3470,6 +3517,40 @@ def sqrt_test():
return ([node], [x], [y])
@onnx_test
def squeeze_axes_input_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 5, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 5])
axes = np.array([1, 3], dtype=np.int64)
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node('Squeeze',
inputs=['x', 'axes'],
outputs=['y'])
return ([node], [x], [y], [axes_tensor])
@onnx_test
def squeeze_empty_axes_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 5, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 5])
axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node('Squeeze',
inputs=['x', 'axes'],
outputs=['y'])
return ([node], [x], [y], [axes_tensor])
@onnx_test
def squeeze_unsqueeze_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
......
......@@ -1420,11 +1420,13 @@ TEST_CASE(if_literal_test)
auto* then_mod = p.create_module("If_1_if");
std::vector<float> data1 = {1, 2, 3, 4, 5};
auto l1 = then_mod->add_literal(migraphx::literal(s, data1));
then_mod->add_literal({});
then_mod->add_return({l1});
auto* else_mod = p.create_module("If_1_else");
std::vector<float> data2 = {5, 4, 3, 2, 1};
auto l2 = else_mod->add_literal(migraphx::literal(s, data2));
else_mod->add_literal({});
else_mod->add_return({l2});
auto ret = mm->add_instruction(migraphx::make_op("if"), {cond}, {then_mod, else_mod});
......@@ -1749,7 +1751,9 @@ TEST_CASE(lessorequal_test)
auto input1 = mm->add_parameter("x1", migraphx::shape{migraphx::shape::float_type, {3}});
auto input2 = mm->add_parameter("x2", migraphx::shape{migraphx::shape::float_type, {3}});
auto temp = mm->add_instruction(migraphx::make_op("greater"), input1, input2);
auto le = mm->add_instruction(migraphx::make_op("not"), temp);
auto bt = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::bool_type}}), temp);
auto le = mm->add_instruction(migraphx::make_op("not"), bt);
mm->add_return({le});
......@@ -1833,7 +1837,7 @@ TEST_CASE(logsoftmax_nonstd_input_test)
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {6, 9}});
auto l1 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {1, 0}}, {"ends", {4, 4}}}), l0);
auto l2 = mm->add_instruction(migraphx::make_op("logsoftmax", {{"axis", 1}}), l1);
auto l2 = mm->add_instruction(migraphx::make_op("logsoftmax", {{"axis", -1}}), l1);
mm->add_return({l2});
auto prog = migraphx::parse_onnx("logsoftmax_nonstd_input_test.onnx");
......@@ -2317,6 +2321,10 @@ TEST_CASE(quantizelinear_test)
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
round);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_mbcast);
min_val =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), min_val);
max_val =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), max_val);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_val, max_val);
mm->add_instruction(
migraphx::make_op("convert",
......@@ -2356,6 +2364,10 @@ migraphx::program make_quantizelinear_axis_prog()
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
round);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_bcast);
min_val = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"output_lens", {1, 1, 5, 1}}}), min_val);
max_val = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"output_lens", {1, 1, 5, 1}}}), max_val);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_val, max_val);
mm->add_instruction(
migraphx::make_op("convert",
......@@ -2541,6 +2553,33 @@ TEST_CASE(reducesum_test)
EXPECT(p == prog);
}
TEST_CASE(reducesum_empty_axes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l1 = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {0, 1, 2, 3}}}), x);
auto r = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {0, 1, 2, 3}}}), l1);
mm->add_return({r});
auto prog = migraphx::parse_onnx("reducesum_empty_axes_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reducesum_noop_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
mm->add_return({x});
auto prog = migraphx::parse_onnx("reducesum_noop_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reducesum_multiaxis_test)
{
migraphx::program p;
......@@ -2961,7 +3000,7 @@ TEST_CASE(softmax_nonstd_input_test)
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {6, 8}});
auto l1 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {1, 0}}, {"ends", {4, 4}}}), l0);
auto l2 = mm->add_instruction(migraphx::make_op("softmax", {{"axis", 1}}), l1);
auto l2 = mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), l1);
mm->add_return({l2});
auto prog = migraphx::parse_onnx("softmax_nonstd_input_test.onnx");
......@@ -3045,6 +3084,34 @@ TEST_CASE(squeeze_unsqueeze_test)
EXPECT(p == prog);
}
TEST_CASE(squeeze_axes_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal({migraphx::shape::int64_type, {2}}, {1, 3}));
auto l0 = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 1, 5, 1}});
auto l1 = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {1, 3}}}), l0);
mm->add_return({l1});
auto prog = migraphx::parse_onnx("squeeze_axes_input_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(squeeze_empty_axes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
auto l0 = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 1, 5, 1}});
auto l1 = mm->add_instruction(migraphx::make_op("squeeze"), l0);
mm->add_return({l1});
auto prog = migraphx::parse_onnx("squeeze_empty_axes_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(sub_bcast_test)
{
migraphx::program p;
......
squeeze_axes_input_test:r

x
axesy"Squeezesqueeze_axes_input_test*:BaxesZ
x




b
y


B
\ No newline at end of file
......@@ -10,6 +10,7 @@ import onnx
import onnx.backend.test
import numpy as np
from onnx_migraphx.backend import MIGraphXBackend as c2
from packaging import version
pytest_plugins = 'onnx.backend.test.report',
......@@ -40,6 +41,36 @@ class MIGraphXBackendTest(onnx.backend.test.BackendTest):
err_msg=prog_string)
def disabled_tests_onnx_1_7_0(backend_test):
backend_test.exclude(r'test_logsoftmax_axis_0_cpu')
backend_test.exclude(r'test_logsoftmax_axis_1_cpu')
backend_test.exclude(r'test_logsoftmax_default_axis_cpu')
backend_test.exclude(r'test_softmax_axis_0_cpu')
backend_test.exclude(r'test_softmax_axis_1_cpu')
backend_test.exclude(r'test_softmax_default_axis_cpu')
def disabled_tests_onnx_1_8_1(backend_test):
backend_test.exclude(r'test_if_seq_cpu')
backend_test.exclude(r'test_reduce_sum_default_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_default_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_empty_axes_input_noop_example_cpu')
backend_test.exclude(r'test_reduce_sum_empty_axes_input_noop_random_cpu')
backend_test.exclude(r'test_reduce_sum_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_unsqueeze_axis_0_cpu')
backend_test.exclude(r'test_unsqueeze_axis_1_cpu')
backend_test.exclude(r'test_unsqueeze_axis_2_cpu')
backend_test.exclude(r'test_unsqueeze_negative_axes_cpu')
backend_test.exclude(r'test_unsqueeze_three_axes_cpu')
backend_test.exclude(r'test_unsqueeze_two_axes_cpu')
backend_test.exclude(r'test_unsqueeze_unsorted_axes_cpu')
def create_backend_test(testname=None, target_device=None):
if target_device is not None:
c2.set_device(target_device)
......@@ -203,9 +234,6 @@ def create_backend_test(testname=None, target_device=None):
)
backend_test.exclude(
r'test_argmin_no_keepdims_example_select_last_index_cpu')
backend_test.exclude(r'test_logsoftmax_axis_0_cpu')
backend_test.exclude(r'test_logsoftmax_axis_1_cpu')
backend_test.exclude(r'test_logsoftmax_default_axis_cpu')
backend_test.exclude(r'test_lrn_cpu')
backend_test.exclude(r'test_lrn_default_cpu')
backend_test.exclude(r'test_maxpool_2d_dilations_cpu')
......@@ -213,9 +241,6 @@ def create_backend_test(testname=None, target_device=None):
r'test_maxpool_with_argmax_2d_precomputed_pads_cpu')
backend_test.exclude(
r'test_maxpool_with_argmax_2d_precomputed_strides_cpu')
backend_test.exclude(r'test_softmax_axis_0_cpu')
backend_test.exclude(r'test_softmax_axis_1_cpu')
backend_test.exclude(r'test_softmax_default_axis_cpu')
# error cases
backend_test.exclude(r'test_constant_pad_cpu')
......@@ -228,6 +253,10 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_depthtospace_example_cpu')
backend_test.exclude(r'test_expand_dim_changed_cpu')
backend_test.exclude(r'test_expand_dim_unchanged_cpu')
backend_test.exclude(r'test_expand_shape_model1_cpu')
backend_test.exclude(r'test_expand_shape_model2_cpu')
backend_test.exclude(r'test_expand_shape_model3_cpu')
backend_test.exclude(r'test_expand_shape_model4_cpu')
backend_test.exclude(r'test_gathernd_example_float32_cpu')
backend_test.exclude(r'test_gathernd_example_int32_batch_dim1_cpu')
backend_test.exclude(r'test_gathernd_example_int32_cpu')
......@@ -238,18 +267,13 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_hardsigmoid_cpu')
backend_test.exclude(r'test_hardsigmoid_default_cpu')
backend_test.exclude(r'test_hardsigmoid_example_cpu')
backend_test.exclude(r'test_less_equal_bcast_cpu')
backend_test.exclude(r'test_less_equal_bcast_expanded_cpu')
backend_test.exclude(r'test_less_equal_cpu')
backend_test.exclude(r'test_less_equal_expanded_cpu')
backend_test.exclude(r'test_maxpool_2d_uint8_cpu')
backend_test.exclude(r'test_mean_example_cpu')
backend_test.exclude(r'test_mean_one_input_cpu')
backend_test.exclude(r'test_mean_two_inputs_cpu')
backend_test.exclude(r'test_negative_log_likelihood_loss_*')
backend_test.exclude(r'test_not_2d_cpu')
backend_test.exclude(r'test_not_3d_cpu')
backend_test.exclude(r'test_not_4d_cpu')
# all reduce ops have dynamic axes inputs
backend_test.exclude(r'test_size_cpu')
backend_test.exclude(r'test_size_example_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_*')
......@@ -262,18 +286,19 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_thresholdedrelu_example_cpu')
backend_test.exclude(r'test_Embedding_cpu')
backend_test.exclude(r'test_Softplus_cpu')
backend_test.exclude(r'test_operator_selu_cpu')
backend_test.exclude(r'test_expand_shape_model1_cpu')
backend_test.exclude(r'test_expand_shape_model2_cpu')
backend_test.exclude(r'test_expand_shape_model3_cpu')
backend_test.exclude(r'test_expand_shape_model4_cpu')
# These three tests failed because of bugs in fuse_ops related to conv
# to be investigated later
# real model tests
backend_test.exclude(r'test_inception_v1_cpu')
backend_test.exclude(r'test_resnet50_cpu')
backend_test.exclude(r'test_squeezenet_cpu')
# additional cases disabled for a specific onnx version
if version.parse(onnx.__version__) <= version.parse("1.7.0"):
disabled_tests_onnx_1_7_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.8.0"):
disabled_tests_onnx_1_8_1(backend_test)
# import all test cases at global scope to make
# them visible to python.unittest.
......
......@@ -3401,7 +3401,7 @@ TEST_CASE(softmax_test)
migraphx::shape a_shape{migraphx::shape::float_type, {5, 3, 4, 2}};
auto al = mm->add_literal(migraphx::literal{a_shape, a});
mm->add_instruction(migraphx::make_op("softmax"), al);
mm->add_instruction(migraphx::make_op("softmax", {{"axis", 1}}), al);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector(120);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment