Unverified Commit 2433f9de authored by Shucai Xiao's avatar Shucai Xiao Committed by GitHub
Browse files

Onnx datatype parsing (#618)

* add bool type

* code backup

* code backup

* clang format

* fix build warnings

* clang format

* add the equal operator

* add the equal operator

* clang format

* remove unnecessary code

* refine unit tests

* clang format

* fix review comments and a bug

* clang format

* additional changes

* clang format

* fix cppcheck error

* add bool type in c api

* fix cppcheck error

* fix review comments

* fix cppcheck error

* fix a build error related to gcc

* fix cppcheck error

* fix cppcheck error

* added the equal operator to register list

* add parsing boolean type

* clang format

* fix bool type issue for python output

* clang format

* add support for automatic multibroadcast of the equal operator

* additional unit tests for more code coverage

* clang format

* missing an onnx file

* code cleanup

* clang format

* fix issue of int64 data type in python api

* clang format

* include more onnx backend unit tests

* add more unit tests to cover code changes

* clang format

* disable python tests for python2.7

* code backup for python half type support

* clang format

* code change to support half data type in python

* add a unit test for python half type

* clang format

* add more unit tests for more code coverage

* clang format

* add more unit tests for more code coverage

* clang format

* refine unit tests to have more code coverage

* clang format

* update python unit test

* revert to a previous version of test_gpu.py
parent 6e1f9f20
...@@ -2543,6 +2543,29 @@ struct onnx_parser ...@@ -2543,6 +2543,29 @@ struct onnx_parser
return result; return result;
} }
static shape::type_t get_type(int dtype)
{
switch(dtype)
{
case 1: return shape::float_type;
case 2: return shape::uint8_type;
case 3: return shape::int8_type;
case 4: return shape::uint16_type;
case 5: return shape::int16_type;
case 6: return shape::int32_type;
case 7: return shape::int64_type;
case 9: return shape::bool_type;
case 10: return shape::half_type;
case 11: return shape::double_type;
case 12: return shape::uint32_type;
case 13: return shape::uint64_type;
default:
{
MIGRAPHX_THROW("Prototensor data type " + std::to_string(dtype) + " not supported");
}
}
}
template <class T> template <class T>
static literal from_repeated(shape::type_t t, const T& r) static literal from_repeated(shape::type_t t, const T& r)
{ {
...@@ -2568,7 +2591,7 @@ struct onnx_parser ...@@ -2568,7 +2591,7 @@ struct onnx_parser
case onnx::AttributeProto::SPARSE_TENSORS: case onnx::AttributeProto::SPARSE_TENSORS:
case onnx::AttributeProto::GRAPHS: return {}; case onnx::AttributeProto::GRAPHS: return {};
} }
MIGRAPHX_THROW("Invalid attribute type"); MIGRAPHX_THROW("PARSE_VALUE: Invalid attribute type " + std::to_string(attr.type()));
} }
static literal parse_tensor(const onnx::TensorProto& t) static literal parse_tensor(const onnx::TensorProto& t)
...@@ -2577,43 +2600,28 @@ struct onnx_parser ...@@ -2577,43 +2600,28 @@ struct onnx_parser
if(t.has_raw_data()) if(t.has_raw_data())
{ {
const std::string& s = t.raw_data(); const std::string& s = t.raw_data();
switch(t.data_type()) auto type = get_type(t.data_type());
{ return create_literal(type, dims, s.data());
case onnx::TensorProto::FLOAT: return create_literal(shape::float_type, dims, s.data());
case onnx::TensorProto::FLOAT16:
return create_literal(shape::half_type, dims, s.data());
case onnx::TensorProto::DOUBLE:
return create_literal(shape::double_type, dims, s.data());
case onnx::TensorProto::INT64: return create_literal(shape::int64_type, dims, s.data());
case onnx::TensorProto::INT8:
case onnx::TensorProto::UINT16:
case onnx::TensorProto::INT16: return create_literal(shape::int16_type, dims, s.data());
case onnx::TensorProto::INT32:
case onnx::TensorProto::BOOL: return create_literal(shape::int32_type, dims, s.data());
case onnx::TensorProto::UINT8:
case onnx::TensorProto::STRING:
case onnx::TensorProto::UNDEFINED:
case onnx::TensorProto::UINT32:
case onnx::TensorProto::UINT64:
case onnx::TensorProto::COMPLEX64:
case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
}
MIGRAPHX_THROW("Invalid tensor type");
} }
switch(t.data_type()) switch(t.data_type())
{ {
case onnx::TensorProto::INT8: case onnx::TensorProto::BOOL: return create_literal(shape::bool_type, dims, t.int32_data());
case onnx::TensorProto::UINT16: case onnx::TensorProto::INT8: return create_literal(shape::int8_type, dims, t.int32_data());
case onnx::TensorProto::UINT8:
return create_literal(shape::uint8_type, dims, t.int32_data());
case onnx::TensorProto::INT16: case onnx::TensorProto::INT16:
return create_literal(shape::int16_type, dims, t.int32_data());
case onnx::TensorProto::UINT16:
return create_literal(shape::uint16_type, dims, t.int32_data());
case onnx::TensorProto::INT32: case onnx::TensorProto::INT32:
case onnx::TensorProto::BOOL:
return create_literal(shape::int32_type, dims, t.int32_data()); return create_literal(shape::int32_type, dims, t.int32_data());
case onnx::TensorProto::UINT32:
return create_literal(shape::uint32_type, dims, t.uint64_data());
case onnx::TensorProto::INT64: case onnx::TensorProto::INT64:
return create_literal(shape::int64_type, dims, t.int64_data()); return create_literal(shape::int64_type, dims, t.int64_data());
case onnx::TensorProto::DOUBLE: case onnx::TensorProto::UINT64:
return create_literal(shape::double_type, dims, t.double_data()); return create_literal(shape::uint64_type, dims, t.uint64_data());
case onnx::TensorProto::FLOAT:
return create_literal(shape::float_type, dims, t.float_data());
case onnx::TensorProto::FLOAT16: case onnx::TensorProto::FLOAT16:
{ {
std::vector<uint16_t> data_uint16(t.int32_data().begin(), t.int32_data().end()); std::vector<uint16_t> data_uint16(t.int32_data().begin(), t.int32_data().end());
...@@ -2624,15 +2632,16 @@ struct onnx_parser ...@@ -2624,15 +2632,16 @@ struct onnx_parser
[](uint16_t raw_val) { return *reinterpret_cast<half*>(&raw_val); }); [](uint16_t raw_val) { return *reinterpret_cast<half*>(&raw_val); });
return create_literal(shape::half_type, dims, data_half); return create_literal(shape::half_type, dims, data_half);
} }
case onnx::TensorProto::DOUBLE:
return create_literal(shape::double_type, dims, t.double_data());
case onnx::TensorProto::FLOAT:
return create_literal(shape::float_type, dims, t.float_data());
case onnx::TensorProto::UNDEFINED: case onnx::TensorProto::UNDEFINED:
case onnx::TensorProto::UINT8:
case onnx::TensorProto::STRING: case onnx::TensorProto::STRING:
case onnx::TensorProto::UINT32:
case onnx::TensorProto::UINT64:
case onnx::TensorProto::COMPLEX64: case onnx::TensorProto::COMPLEX64:
case onnx::TensorProto::COMPLEX128: throw std::runtime_error(""); case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
} }
MIGRAPHX_THROW("Invalid tensor type"); MIGRAPHX_THROW("PARSE_TENSOR: Invalid tensor type");
} }
static literal static literal
...@@ -2654,29 +2663,7 @@ struct onnx_parser ...@@ -2654,29 +2663,7 @@ struct onnx_parser
shape parse_type(const onnx::TypeProto& t, const std::vector<std::size_t>& input_dims) shape parse_type(const onnx::TypeProto& t, const std::vector<std::size_t>& input_dims)
{ {
shape::type_t shape_type{}; shape::type_t shape_type = get_type(t.tensor_type().elem_type());
switch(t.tensor_type().elem_type())
{
case onnx::TensorProto::FLOAT: shape_type = shape::float_type; break;
case onnx::TensorProto::INT8: shape_type = shape::int8_type; break;
case onnx::TensorProto::UINT16: shape_type = shape::uint16_type; break;
case onnx::TensorProto::INT16: shape_type = shape::int16_type; break;
case onnx::TensorProto::INT32: shape_type = shape::int32_type; break;
case onnx::TensorProto::INT64: shape_type = shape::int64_type; break;
case onnx::TensorProto::FLOAT16: shape_type = shape::half_type; break;
case onnx::TensorProto::DOUBLE: shape_type = shape::double_type; break;
case onnx::TensorProto::UINT32: shape_type = shape::uint32_type; break;
case onnx::TensorProto::UINT64: shape_type = shape::uint64_type; break;
case onnx::TensorProto::UINT8: shape_type = shape::uint8_type; break;
case onnx::TensorProto::BOOL: shape_type = shape::bool_type; break;
case onnx::TensorProto::STRING:
case onnx::TensorProto::UNDEFINED:
case onnx::TensorProto::COMPLEX64:
case onnx::TensorProto::COMPLEX128:
MIGRAPHX_THROW("PARSE_TYPE: unsupported type" +
std::to_string(t.tensor_type().elem_type()));
}
if(!input_dims.empty()) if(!input_dims.empty())
{ {
return {shape_type, input_dims}; return {shape_type, input_dims};
...@@ -2708,29 +2695,6 @@ struct onnx_parser ...@@ -2708,29 +2695,6 @@ struct onnx_parser
return {shape_type, dims}; return {shape_type, dims};
} }
shape::type_t get_type(int dtype)
{
switch(dtype)
{
case 1: return shape::float_type;
case 2: return shape::uint8_type;
case 3: return shape::int8_type;
case 4: return shape::uint16_type;
case 5: return shape::int16_type;
case 6: return shape::int32_type;
case 7: return shape::int64_type;
case 9: return shape::bool_type;
case 10: return shape::half_type;
case 11: return shape::double_type;
case 12: return shape::uint32_type;
case 13: return shape::uint64_type;
default:
{
MIGRAPHX_THROW("Prototensor data type " + std::to_string(dtype) + " not supported");
}
}
}
void check_arg_empty(const argument& arg, const std::string& msg) void check_arg_empty(const argument& arg, const std::string& msg)
{ {
if(arg.empty()) if(arg.empty())
......
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include <pybind11/stl.h> #include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/quantization.hpp> #include <migraphx/quantization.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
...@@ -15,62 +16,42 @@ ...@@ -15,62 +16,42 @@
#include <migraphx/gpu/hip.hpp> #include <migraphx/gpu/hip.hpp>
#endif #endif
using half = half_float::half;
namespace py = pybind11; namespace py = pybind11;
template <class F> namespace pybind11 {
struct throw_half namespace detail {
{
F f;
template <class A>
void operator()(A a) const
{
f(a);
}
void operator()(migraphx::shape::as<migraphx::half>) const template <>
{ struct npy_format_descriptor<half>
throw std::runtime_error("Half not supported in python yet.");
}
void operator()(migraphx::tensor_view<migraphx::half>) const
{
throw std::runtime_error("Half not supported in python yet.");
}
};
template <class F>
struct skip_half
{ {
F f; static std::string format()
template <class A>
void operator()(A a) const
{ {
f(a); // following: https://docs.python.org/3/library/struct.html#format-characters
return "e";
} }
static constexpr auto name() { return _("half"); }
void operator()(migraphx::shape::as<migraphx::half>) const {}
void operator()(migraphx::tensor_view<migraphx::half>) const {}
}; };
} // namespace detail
} // namespace pybind11
template <class F> template <class F>
void visit_type(const migraphx::shape& s, F f) void visit_type(const migraphx::shape& s, F f)
{ {
s.visit_type(throw_half<F>{f}); s.visit_type(f);
} }
template <class T, class F> template <class T, class F>
void visit(const migraphx::raw_data<T>& x, F f) void visit(const migraphx::raw_data<T>& x, F f)
{ {
x.visit(throw_half<F>{f}); x.visit(f);
} }
template <class F> template <class F>
void visit_types(F f) void visit_types(F f)
{ {
migraphx::shape::visit_types(skip_half<F>{f}); migraphx::shape::visit_types(f);
} }
template <class T> template <class T>
...@@ -111,7 +92,9 @@ migraphx::shape to_shape(const py::buffer_info& info) ...@@ -111,7 +92,9 @@ migraphx::shape to_shape(const py::buffer_info& info)
migraphx::shape::type_t t; migraphx::shape::type_t t;
std::size_t n = 0; std::size_t n = 0;
visit_types([&](auto as) { visit_types([&](auto as) {
if(info.format == py::format_descriptor<decltype(as())>::format()) if(info.format == py::format_descriptor<decltype(as())>::format() or
(info.format == "l" and py::format_descriptor<decltype(as())>::format() == "q") or
(info.format == "L" and py::format_descriptor<decltype(as())>::format() == "Q"))
{ {
t = as.type_enum(); t = as.type_enum();
n = sizeof(as()); n = sizeof(as());
...@@ -120,7 +103,7 @@ migraphx::shape to_shape(const py::buffer_info& info) ...@@ -120,7 +103,7 @@ migraphx::shape to_shape(const py::buffer_info& info)
if(n == 0) if(n == 0)
{ {
MIGRAPHX_THROW("MIGRAPHX PYTHON: Unsupported data type" + info.format); MIGRAPHX_THROW("MIGRAPHX PYTHON: Unsupported data type " + info.format);
} }
auto strides = info.strides; auto strides = info.strides;
......
...@@ -1436,9 +1436,9 @@ def implicit_pow_bcast_test(): ...@@ -1436,9 +1436,9 @@ def implicit_pow_bcast_test():
@onnx_test @onnx_test
def implicit_sub_bcast_test(): def implicit_sub_bcast_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) arg0 = helper.make_tensor_value_info('0', TensorProto.UINT64, [2, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5]) arg1 = helper.make_tensor_value_info('1', TensorProto.UINT64, [4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT, arg_out = helper.make_tensor_value_info('out', TensorProto.UINT64,
[2, 3, 4, 5]) [2, 3, 4, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node(
...@@ -1802,8 +1802,8 @@ def min_test(): ...@@ -1802,8 +1802,8 @@ def min_test():
@onnx_test @onnx_test
def neg_test(): def neg_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3]) x = helper.make_tensor_value_info('0', TensorProto.INT64, [2, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 3]) y = helper.make_tensor_value_info('1', TensorProto.INT64, [2, 3])
node = onnx.helper.make_node('Neg', inputs=['0'], outputs=['1']) node = onnx.helper.make_node('Neg', inputs=['0'], outputs=['1'])
...@@ -2683,6 +2683,26 @@ def sub_scalar_test(): ...@@ -2683,6 +2683,26 @@ def sub_scalar_test():
return ([arg_const, node], [arg_node], [arg_out]) return ([arg_const, node], [arg_node], [arg_out])
@onnx_test
def sum_int_test():
a = helper.make_tensor_value_info('0', TensorProto.INT16, [3])
b = helper.make_tensor_value_info('1', TensorProto.UINT16, [3])
c = helper.make_tensor_value_info('2', TensorProto.UINT32, [3])
y = helper.make_tensor_value_info('3', TensorProto.UINT32, [3])
cnode1 = onnx.helper.make_node('Cast', inputs=['0'], outputs=['c0'], to=12)
cnode2 = onnx.helper.make_node('Cast', inputs=['1'], outputs=['c1'], to=12)
node = onnx.helper.make_node(
'Sum',
inputs=['c0', 'c1', '2'],
outputs=['3'],
)
return ([cnode1, cnode2, node], [a, b, c], [y])
@onnx_test @onnx_test
def sum_test(): def sum_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
...@@ -2699,6 +2719,100 @@ def sum_test(): ...@@ -2699,6 +2719,100 @@ def sum_test():
return ([node], [a, b, c], [y]) return ([node], [a, b, c], [y])
@onnx_test
def sum_type_test():
valb = np.array([1, 0])
t_bool = helper.make_tensor(name="bool",
data_type=TensorProto.BOOL,
dims=valb.shape,
vals=valb.astype(np.bool))
val = np.array([1, 1])
t_int8 = helper.make_tensor(name="int8",
data_type=TensorProto.INT8,
dims=val.shape,
vals=val.astype(np.int8))
t_uint8 = helper.make_tensor(name="uint8",
data_type=TensorProto.UINT8,
dims=val.shape,
vals=val.astype(np.uint8))
t_uint16 = helper.make_tensor(name="uint16",
data_type=TensorProto.UINT16,
dims=val.shape,
vals=val.astype(np.uint16))
t_uint32 = helper.make_tensor(name="uint32",
data_type=TensorProto.UINT32,
dims=val.shape,
vals=val.astype(np.uint32))
t_uint64 = helper.make_tensor(name="uint64",
data_type=TensorProto.UINT64,
dims=val.shape,
vals=val.astype(np.uint64))
t_double = helper.make_tensor(name="double",
data_type=TensorProto.DOUBLE,
dims=val.shape,
vals=val.astype(np.float64))
valr = np.array([1.5, 2.0])
t_raw = helper.make_tensor(name="raw",
data_type=TensorProto.DOUBLE,
dims=valr.shape,
vals=valr.tobytes(),
raw=True)
n_bool = onnx.helper.make_node('Cast',
inputs=['bool'],
outputs=['o_bool'],
to=11)
n_int8 = onnx.helper.make_node('Cast',
inputs=['int8'],
outputs=['o_int8'],
to=11)
n_uint8 = onnx.helper.make_node('Cast',
inputs=['uint8'],
outputs=['o_uint8'],
to=11)
n_uint16 = onnx.helper.make_node('Cast',
inputs=['uint16'],
outputs=['o_uint16'],
to=11)
n_uint32 = onnx.helper.make_node('Cast',
inputs=['uint32'],
outputs=['o_uint32'],
to=11)
n_uint64 = onnx.helper.make_node('Cast',
inputs=['uint64'],
outputs=['o_uint64'],
to=11)
node = onnx.helper.make_node(
'Sum',
inputs=[
'o_bool', 'o_int8', 'o_uint8', 'o_uint16', 'o_uint32', 'o_uint64',
'double', 'raw'
],
outputs=['out'],
)
y = helper.make_tensor_value_info('out', TensorProto.DOUBLE, [2])
return ([n_bool, n_int8, n_uint8, n_uint16, n_uint32, n_uint64,
node], [], [y], [
t_bool, t_int8, t_uint8, t_uint16, t_uint32, t_uint64,
t_double, t_raw
])
@onnx_test @onnx_test
def tan_test(): def tan_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
......
add2:q implicit_sub_bcast_test:|
 
0 0
1out"Sub subtraction2Z 1out"Subimplicit_sub_bcast_testZ
0 0
  
 
 
 
Z Z
1 1
  
 
b b
out out
  
 
 
 
B B
\ No newline at end of file
......
neg_test:A neg_test:A
01"Negneg_testZ 01"Negneg_testZ
0 0
 
 
b b
1 1
 
 
B B
\ No newline at end of file \ No newline at end of file
...@@ -1102,8 +1102,8 @@ TEST_CASE(implicit_pow_bcast_test) ...@@ -1102,8 +1102,8 @@ TEST_CASE(implicit_pow_bcast_test)
TEST_CASE(implicit_sub_bcast_test) TEST_CASE(implicit_sub_bcast_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::uint64_type, {2, 3, 4, 5}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {4, 5}}); auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::uint64_type, {4, 5}});
auto l3 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l1); auto l3 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l1);
p.add_instruction(migraphx::op::sub{}, l0, l3); p.add_instruction(migraphx::op::sub{}, l0, l3);
...@@ -1372,7 +1372,7 @@ TEST_CASE(no_pad_test) ...@@ -1372,7 +1372,7 @@ TEST_CASE(no_pad_test)
TEST_CASE(neg_test) TEST_CASE(neg_test)
{ {
migraphx::program p; migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}}; migraphx::shape s{migraphx::shape::int64_type, {2, 3}};
auto input = p.add_parameter("0", s); auto input = p.add_parameter("0", s);
auto ret = p.add_instruction(migraphx::op::neg{}, input); auto ret = p.add_instruction(migraphx::op::neg{}, input);
p.add_return({ret}); p.add_return({ret});
...@@ -1401,7 +1401,7 @@ TEST_CASE(nonzero_test) ...@@ -1401,7 +1401,7 @@ TEST_CASE(nonzero_test)
TEST_CASE(nonzero_int_test) TEST_CASE(nonzero_int_test)
{ {
migraphx::program p; migraphx::program p;
migraphx::shape s{migraphx::shape::int32_type, {2, 3}}; migraphx::shape s{migraphx::shape::int16_type, {2, 3}};
std::vector<int> data = {1, 1, 0, 1, 0, 1}; std::vector<int> data = {1, 1, 0, 1, 0, 1};
p.add_literal(migraphx::literal(s, data.begin(), data.end())); p.add_literal(migraphx::literal(s, data.begin(), data.end()));
...@@ -1953,6 +1953,21 @@ TEST_CASE(sub_scalar_test) ...@@ -1953,6 +1953,21 @@ TEST_CASE(sub_scalar_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(sum_int_test)
{
migraphx::program p;
auto input0 = p.add_parameter("0", migraphx::shape{migraphx::shape::int16_type, {3}});
auto input1 = p.add_parameter("1", migraphx::shape{migraphx::shape::uint16_type, {3}});
auto input2 = p.add_parameter("2", migraphx::shape{migraphx::shape::uint32_type, {3}});
auto cin0 = p.add_instruction(migraphx::op::convert{migraphx::shape::uint32_type}, input0);
auto cin1 = p.add_instruction(migraphx::op::convert{migraphx::shape::uint32_type}, input1);
auto l0 = p.add_instruction(migraphx::op::add{}, cin0, cin1);
p.add_instruction(migraphx::op::add{}, l0, input2);
auto prog = optimize_onnx("sum_int_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(sum_test) TEST_CASE(sum_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -1966,6 +1981,40 @@ TEST_CASE(sum_test) ...@@ -1966,6 +1981,40 @@ TEST_CASE(sum_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(sum_type_test)
{
migraphx::program p;
auto l_bool = p.add_literal({migraphx::shape{migraphx::shape::bool_type, {2}}, {1, 0}});
auto l_int8 = p.add_literal({migraphx::shape{migraphx::shape::int8_type, {2}}, {1, 1}});
auto l_uint8 = p.add_literal({migraphx::shape{migraphx::shape::uint8_type, {2}}, {1, 1}});
auto l_uint16 = p.add_literal({migraphx::shape{migraphx::shape::uint16_type, {2}}, {1, 1}});
auto l_uint32 = p.add_literal({migraphx::shape{migraphx::shape::uint32_type, {2}}, {1, 1}});
auto l_uint64 = p.add_literal({migraphx::shape{migraphx::shape::uint64_type, {2}}, {1, 1}});
auto l_double = p.add_literal({migraphx::shape{migraphx::shape::double_type, {2}}, {1, 1}});
auto l_raw = p.add_literal({migraphx::shape{migraphx::shape::double_type, {2}}, {1.5, 2.0}});
auto o_bool = p.add_instruction(migraphx::op::convert{migraphx::shape::double_type}, l_bool);
auto o_int8 = p.add_instruction(migraphx::op::convert{migraphx::shape::double_type}, l_int8);
auto o_uint8 = p.add_instruction(migraphx::op::convert{migraphx::shape::double_type}, l_uint8);
auto o_uint16 =
p.add_instruction(migraphx::op::convert{migraphx::shape::double_type}, l_uint16);
auto o_uint32 =
p.add_instruction(migraphx::op::convert{migraphx::shape::double_type}, l_uint32);
auto o_uint64 =
p.add_instruction(migraphx::op::convert{migraphx::shape::double_type}, l_uint64);
auto s0 = p.add_instruction(migraphx::op::add{}, o_bool, o_int8);
auto s1 = p.add_instruction(migraphx::op::add{}, s0, o_uint8);
auto s2 = p.add_instruction(migraphx::op::add{}, s1, o_uint16);
auto s3 = p.add_instruction(migraphx::op::add{}, s2, o_uint32);
auto s4 = p.add_instruction(migraphx::op::add{}, s3, o_uint64);
auto s5 = p.add_instruction(migraphx::op::add{}, s4, l_double);
auto s6 = p.add_instruction(migraphx::op::add{}, s5, l_raw);
p.add_return({s6});
auto prog = migraphx::parse_onnx("sum_type_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(tan_test) TEST_CASE(tan_test)
{ {
migraphx::program p; migraphx::program p;
......
 sum_int_test:›

0c0"Cast*
to  

1c1"Cast*
to  

c0
c1
23"Sum sum_int_testZ
0

Z
1

Z
2
 
b
3
 
B
\ No newline at end of file
...@@ -224,7 +224,6 @@ def create_backend_test(testname=None, target_device=None): ...@@ -224,7 +224,6 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_gather_elements_0_cpu') backend_test.exclude(r'test_gather_elements_0_cpu')
backend_test.exclude(r'test_gather_elements_1_cpu') backend_test.exclude(r'test_gather_elements_1_cpu')
backend_test.exclude(r'test_gather_elements_negative_indices_cpu') backend_test.exclude(r'test_gather_elements_negative_indices_cpu')
backend_test.exclude(r'test_gather_negative_indices_cpu')
backend_test.exclude(r'test_gathernd_example_float32_cpu') backend_test.exclude(r'test_gathernd_example_float32_cpu')
backend_test.exclude(r'test_gathernd_example_int32_batch_dim1_cpu') backend_test.exclude(r'test_gathernd_example_int32_batch_dim1_cpu')
backend_test.exclude(r'test_gathernd_example_int32_cpu') backend_test.exclude(r'test_gathernd_example_int32_cpu')
...@@ -273,9 +272,7 @@ def create_backend_test(testname=None, target_device=None): ...@@ -273,9 +272,7 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_thresholdedrelu_default_cpu') backend_test.exclude(r'test_thresholdedrelu_default_cpu')
backend_test.exclude(r'test_thresholdedrelu_example_cpu') backend_test.exclude(r'test_thresholdedrelu_example_cpu')
backend_test.exclude(r'test_Embedding_cpu') backend_test.exclude(r'test_Embedding_cpu')
backend_test.exclude(r'test_Embedding_sparse_cpu')
backend_test.exclude(r'test_Softplus_cpu') backend_test.exclude(r'test_Softplus_cpu')
backend_test.exclude(r'test_operator_non_float_params_cpu')
backend_test.exclude(r'test_operator_selu_cpu') backend_test.exclude(r'test_operator_selu_cpu')
backend_test.exclude(r'test_expand_shape_model1_cpu') backend_test.exclude(r'test_expand_shape_model1_cpu')
backend_test.exclude(r'test_expand_shape_model2_cpu') backend_test.exclude(r'test_expand_shape_model2_cpu')
......
import sys
if sys.version_info < (3, 0):
sys.exit()
import migraphx import migraphx
import numpy as np
def test_conv_relu():
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
print("Compiling ...")
p.compile(migraphx.get_target("gpu"))
print(p)
params = {}
for key, value in p.get_parameter_shapes().items():
print("Parameter {} -> {}".format(key, value))
params[key] = migraphx.generate_argument(value)
r = p.run(params)
print(r)
def test_sub_uint64():
p = migraphx.parse_onnx("implicit_sub_bcast_test.onnx")
print(p)
print("Compiling ...")
p.compile(migraphx.get_target("gpu"))
print(p)
params = {}
shapes = p.get_parameter_shapes()
arg0 = np.arange(120).reshape(shapes["0"].lens()).astype(np.uint64)
arg1 = np.arange(20).reshape(shapes["1"].lens()).astype(np.uint64)
params["0"] = migraphx.argument(arg0)
params["1"] = migraphx.argument(arg1)
r = p.run(params)
print(r)
def test_neg_int64():
p = migraphx.parse_onnx("neg_test.onnx")
print(p)
print("Compiling ...")
p.compile(migraphx.get_target("gpu"))
print(p)
params = {}
shapes = p.get_parameter_shapes()
arg0 = np.arange(6).reshape(shapes["0"].lens()).astype(np.int64)
params["0"] = migraphx.argument(arg0)
r = p.run(params)
print(r)
def test_fp16_imagescaler():
p = migraphx.parse_onnx("imagescaler_half_test.onnx")
print(p)
s1 = p.get_output_shapes()[-1]
print("Compiling ...")
p.compile(migraphx.get_target("gpu"))
print(p)
s2 = p.get_output_shapes()[-1]
assert s1 == s2
params = {}
shapes = p.get_parameter_shapes()
arg0 = np.random.randn(768).reshape(shapes["0"].lens()).astype(np.float16)
params["0"] = migraphx.argument(arg0)
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx") r = p.run(params)[-1]
print(p) print(r)
print("Compiling ...")
p.compile(migraphx.get_target("gpu"))
print(p)
params = {}
for key, value in p.get_parameter_shapes().items():
print("Parameter {} -> {}".format(key, value))
params[key] = migraphx.generate_argument(value)
r = p.run(params) test_conv_relu()
print(r) test_sub_uint64()
test_neg_int64()
test_fp16_imagescaler()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment