Commit 34150e61 authored by Khalique's avatar Khalique
Browse files

add decorators

parents 8ded3a31 62cb3441
...@@ -206,6 +206,16 @@ struct onnx_parser ...@@ -206,6 +206,16 @@ struct onnx_parser
return out_lens; return out_lens;
} }
instruction_ref make_contiguous(instruction_ref ins)
{
if(ins->get_shape().standard())
{
return ins;
}
return prog.add_instruction(op::contiguous{}, ins);
}
template <class T> template <class T>
instruction_ref add_broadcastable_binary_op(instruction_ref arg0, instruction_ref arg1, T x) instruction_ref add_broadcastable_binary_op(instruction_ref arg0, instruction_ref arg1, T x)
{ {
...@@ -441,12 +451,7 @@ struct onnx_parser ...@@ -441,12 +451,7 @@ struct onnx_parser
s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); }); s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); });
} }
if(!args[0]->get_shape().standard()) return prog.add_instruction(op, make_contiguous(args[0]));
{
args[0] = prog.add_instruction(op::contiguous{}, args[0]);
}
return prog.add_instruction(op, args[0]);
} }
instruction_ref instruction_ref
...@@ -494,23 +499,41 @@ struct onnx_parser ...@@ -494,23 +499,41 @@ struct onnx_parser
{ {
axis = parse_value(attributes.at("axis")).at<int>(); axis = parse_value(attributes.at("axis")).at<int>();
} }
op::gather op{axis}; op::gather op{axis};
return prog.add_instruction(op, std::move(args)); return prog.add_instruction(op, make_contiguous(args[0]), make_contiguous(args[1]));
} }
instruction_ref instruction_ref
parse_slice(const std::string&, attribute_map attributes, std::vector<instruction_ref> args) parse_slice(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{ {
op::slice op; op::slice op;
std::vector<size_t> dims = args[0]->get_shape().lens();
size_t num_dims = dims.size();
if(contains(attributes, "axes")) if(contains(attributes, "axes"))
{ {
literal s = parse_value(attributes.at("axes")); literal s = parse_value(attributes.at("axes"));
s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); }); s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
} }
else
{
op.axes = std::vector<int64_t>(num_dims);
std::iota(op.axes.begin(), op.axes.end(), 0);
}
if(contains(attributes, "ends"))
{ {
literal s = parse_value(attributes.at("ends")); literal s = parse_value(attributes.at("ends"));
s.visit([&](auto v) { copy(v, std::back_inserter(op.ends)); }); s.visit([&](auto v) { copy(v, std::back_inserter(op.ends)); });
for(size_t i = 0; i < num_dims; i++)
{
if(static_cast<size_t>(op.ends[i]) > dims[i])
{
op.ends[i] = dims[i];
}
}
} }
if(contains(attributes, "starts"))
{ {
literal s = parse_value(attributes.at("starts")); literal s = parse_value(attributes.at("starts"));
s.visit([&](auto v) { copy(v, std::back_inserter(op.starts)); }); s.visit([&](auto v) { copy(v, std::back_inserter(op.starts)); });
......
...@@ -74,7 +74,8 @@ void quantize(program& prog, const std::vector<std::string>& ins_names) ...@@ -74,7 +74,8 @@ void quantize(program& prog, const std::vector<std::string>& ins_names)
// if the input is a convert operator, uses its input // if the input is a convert operator, uses its input
// as its current input // as its current input
instruction_ref input_fp16{}; instruction_ref input_fp16{};
if(input->name() == "convert") if(input->name() == "convert" and
input->inputs().front()->get_shape().type() == shape::half_type)
{ {
input_fp16 = input->inputs().front(); input_fp16 = input->inputs().front();
} }
......
...@@ -8,51 +8,6 @@ namespace migraphx { ...@@ -8,51 +8,6 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
template <class... Ts>
rocblas_status generic_rocblas_gemm_ex(Ts&&... xs)
{
return rocblas_gemm_ex(std::forward<Ts>(xs)...);
}
template <class... Ts>
rocblas_status generic_rocblas_batched_gemm_ex(Ts&&... xs)
{
return rocblas_gemm_strided_batched_ex(std::forward<Ts>(xs)...);
}
template <class T>
struct compute_rocblas_type
{
using type = T;
};
template <class T>
struct compute_rocblas_type<const T>
{
using type = const typename compute_rocblas_type<T>::type;
};
template <>
struct compute_rocblas_type<half>
{
using type = rocblas_half;
};
template <class T>
using rb_type = typename compute_rocblas_type<T>::type;
template <class T>
rb_type<T> to_rocblas_type(T x)
{
return reinterpret_cast<const rb_type<T>&>(x);
}
template <class T>
rb_type<T>* to_rocblas_type(T* x)
{
return reinterpret_cast<rb_type<T>*>(x);
}
shape rocblas_quant_gemm::compute_shape(const std::vector<shape>& inputs) const shape rocblas_quant_gemm::compute_shape(const std::vector<shape>& inputs) const
{ {
std::vector<shape> in_shapes(inputs); std::vector<shape> in_shapes(inputs);
...@@ -102,13 +57,13 @@ argument rocblas_quant_gemm::compute(context& ctx, ...@@ -102,13 +57,13 @@ argument rocblas_quant_gemm::compute(context& ctx,
auto a_lens = args[0].get_shape().lens(); auto a_lens = args[0].get_shape().lens();
auto b_lens = args[1].get_shape().lens(); auto b_lens = args[1].get_shape().lens();
output_shape.visit_type([&](auto as) { output_shape.visit_type([&](auto as) {
auto alpha_r = to_rocblas_type(as(op.alpha)); auto alpha_r = as(op.alpha);
auto beta_r = to_rocblas_type(as(beta)); auto beta_r = as(beta);
auto out_lens = output_shape.lens(); auto out_lens = output_shape.lens();
rocblas_int m = out_lens[dim_0]; rocblas_int m = out_lens[dim_0];
rocblas_int n = out_lens[dim_1]; rocblas_int n = out_lens[dim_1];
rocblas_int k = args[0].get_shape().lens()[dim_1]; rocblas_int k = args[0].get_shape().lens()[dim_1];
auto to_pointer = [&](auto&& arg) { return to_rocblas_type(as.from(arg.data())); }; auto to_pointer = [&](auto&& arg) { return as.from(arg.data()); };
assert(k % 4 == 0); assert(k % 4 == 0);
auto num_matrices = std::accumulate( auto num_matrices = std::accumulate(
...@@ -119,36 +74,36 @@ argument rocblas_quant_gemm::compute(context& ctx, ...@@ -119,36 +74,36 @@ argument rocblas_quant_gemm::compute(context& ctx,
// column-major format. When doing a C = A * B, we actually do // column-major format. When doing a C = A * B, we actually do
// C^T = (B^T) * (A^T). That is the reason we input args[1] as // C^T = (B^T) * (A^T). That is the reason we input args[1] as
// A and args[0] as B in calling the rocblas_gemm. // A and args[0] as B in calling the rocblas_gemm.
generic_rocblas_gemm_ex(ctx.get_stream().get_rocblas(), rocblas_gemm_ex(ctx.get_stream().get_rocblas(),
transb ? rocblas_operation_transpose : rocblas_operation_none, transb ? rocblas_operation_transpose : rocblas_operation_none,
transa ? rocblas_operation_transpose : rocblas_operation_none, transa ? rocblas_operation_transpose : rocblas_operation_none,
n, n,
m, m,
k, k,
&alpha_r, &alpha_r,
to_pointer(args.at(1)), to_pointer(args.at(1)),
rocblas_datatype_i8_r, rocblas_datatype_i8_r,
ldb, ldb,
to_pointer(args.at(0)), to_pointer(args.at(0)),
rocblas_datatype_i8_r, rocblas_datatype_i8_r,
lda, lda,
&beta_r, &beta_r,
to_pointer(args[2]), to_pointer(args[2]),
rocblas_datatype_i32_r, rocblas_datatype_i32_r,
ldc, ldc,
is_3inputs ? to_pointer(args[3]) : to_pointer(args[2]), is_3inputs ? to_pointer(args[3]) : to_pointer(args[2]),
rocblas_datatype_i32_r, rocblas_datatype_i32_r,
ldc, ldc,
rocblas_datatype_i32_r, rocblas_datatype_i32_r,
rocblas_gemm_algo_standard, rocblas_gemm_algo_standard,
0, 0,
0, 0,
nullptr, nullptr,
nullptr); nullptr);
} }
else else
{ {
generic_rocblas_batched_gemm_ex( rocblas_gemm_strided_batched_ex(
ctx.get_stream().get_rocblas(), ctx.get_stream().get_rocblas(),
transb ? rocblas_operation_transpose : rocblas_operation_none, transb ? rocblas_operation_transpose : rocblas_operation_none,
transa ? rocblas_operation_transpose : rocblas_operation_none, transa ? rocblas_operation_transpose : rocblas_operation_none,
......
...@@ -26,7 +26,6 @@ struct tf_parser ...@@ -26,7 +26,6 @@ struct tf_parser
{ {
using attribute_map = std::unordered_map<std::string, tensorflow::AttrValue>; using attribute_map = std::unordered_map<std::string, tensorflow::AttrValue>;
using node_map = std::map<std::string, tensorflow::NodeDef>; using node_map = std::map<std::string, tensorflow::NodeDef>;
// using input_node_map = std::unordered_map<std::string, std::unordered_set<std::string>>;
using op_func = std::function<instruction_ref(attribute_map, std::vector<instruction_ref>)>; using op_func = std::function<instruction_ref(attribute_map, std::vector<instruction_ref>)>;
node_map nodes; node_map nodes;
...@@ -149,9 +148,26 @@ struct tf_parser ...@@ -149,9 +148,26 @@ struct tf_parser
return axes; return axes;
} }
std::vector<int64_t> get_axes_from_mask(const size_t num_axes, const uint32_t mask)
{
uint32_t bitwise_compare = 1;
std::vector<int64_t> axes;
for(size_t i = 0; i < num_axes; i++)
{
// the LSB corresponds to axis 0 when determining which axes to begin
if(((mask >> i) & bitwise_compare) == 1)
axes.push_back(1);
else
axes.push_back(0);
}
return axes;
}
tf_parser() tf_parser()
{ {
add_generic_op("All", op::identity{});
add_generic_op("Identity", op::identity{}); add_generic_op("Identity", op::identity{});
add_generic_op("LessEqual", op::identity{});
add_generic_op("Relu", op::relu{}); add_generic_op("Relu", op::relu{});
add_generic_op("Relu6", op::clip{6.0, 0.0}); add_generic_op("Relu6", op::clip{6.0, 0.0});
add_generic_op("Rsqrt", op::rsqrt{}); add_generic_op("Rsqrt", op::rsqrt{});
...@@ -166,6 +182,7 @@ struct tf_parser ...@@ -166,6 +182,7 @@ struct tf_parser
add_mem_op("AvgPool", &tf_parser::parse_pooling); add_mem_op("AvgPool", &tf_parser::parse_pooling);
add_mem_op("BatchMatMul", &tf_parser::parse_matmul, false); add_mem_op("BatchMatMul", &tf_parser::parse_matmul, false);
add_mem_op("BatchMatMulV2", &tf_parser::parse_matmul, false);
add_mem_op("BiasAdd", &tf_parser::parse_biasadd); add_mem_op("BiasAdd", &tf_parser::parse_biasadd);
add_mem_op("Cast", &tf_parser::parse_cast, false); add_mem_op("Cast", &tf_parser::parse_cast, false);
add_mem_op("ConcatV2", &tf_parser::parse_concat, false); add_mem_op("ConcatV2", &tf_parser::parse_concat, false);
...@@ -177,14 +194,15 @@ struct tf_parser ...@@ -177,14 +194,15 @@ struct tf_parser
add_mem_op("GatherV2", &tf_parser::parse_gather, false); add_mem_op("GatherV2", &tf_parser::parse_gather, false);
add_mem_op("MatMul", &tf_parser::parse_matmul, false); add_mem_op("MatMul", &tf_parser::parse_matmul, false);
add_mem_op("MaxPool", &tf_parser::parse_pooling); add_mem_op("MaxPool", &tf_parser::parse_pooling);
add_mem_op("Mean", &tf_parser::parse_mean); add_mem_op("Mean", &tf_parser::parse_mean, false);
add_mem_op("OneHot", &tf_parser::parse_onehot, false);
add_mem_op("Pack", &tf_parser::parse_pack, false); add_mem_op("Pack", &tf_parser::parse_pack, false);
add_mem_op("Pad", &tf_parser::parse_pad); add_mem_op("Pad", &tf_parser::parse_pad);
add_mem_op("Reshape", &tf_parser::parse_reshape, false); add_mem_op("Reshape", &tf_parser::parse_reshape, false);
add_mem_op("Slice", &tf_parser::parse_slice, false); add_mem_op("Slice", &tf_parser::parse_slice, false);
add_mem_op("Softmax", &tf_parser::parse_softmax<op::softmax>); add_mem_op("Softmax", &tf_parser::parse_softmax<op::softmax>, false);
add_mem_op("Squeeze", &tf_parser::parse_squeeze, false); add_mem_op("Squeeze", &tf_parser::parse_squeeze, false);
add_mem_op("StridedSlice", &tf_parser::parse_stridedslice); add_mem_op("StridedSlice", &tf_parser::parse_stridedslice, false);
add_mem_op("Transpose", &tf_parser::parse_transpose, false); add_mem_op("Transpose", &tf_parser::parse_transpose, false);
} }
...@@ -547,7 +565,7 @@ struct tf_parser ...@@ -547,7 +565,7 @@ struct tf_parser
} }
if(contains(attributes, "transpose_b")) if(contains(attributes, "transpose_b"))
{ {
transb = attributes.at("transpose_a").b(); transb = attributes.at("transpose_b").b();
} }
if(contains(attributes, "adj_x")) if(contains(attributes, "adj_x"))
...@@ -574,8 +592,7 @@ struct tf_parser ...@@ -574,8 +592,7 @@ struct tf_parser
parse_mean(const std::string&, attribute_map attributes, std::vector<instruction_ref> args) parse_mean(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{ {
bool keep_dims = attributes.at("keep_dims").b(); bool keep_dims = attributes.at("keep_dims").b();
auto lens = args[0]->get_shape().lens(); auto axes = args[1]->eval().get<int32_t>().to_vector<int64_t>();
auto axes = parse_axes(args[1]->eval().get<int32_t>().to_vector<int64_t>(), lens.size());
if(keep_dims) if(keep_dims)
{ {
...@@ -588,6 +605,32 @@ struct tf_parser ...@@ -588,6 +605,32 @@ struct tf_parser
} }
} }
instruction_ref
parse_onehot(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
size_t depth = static_cast<size_t>(args[1]->eval().at<int32_t>());
int64_t axis = -1;
float on_value = args[2]->eval().at<float>();
float off_value = args[3]->eval().at<float>();
std::vector<float> depth_input(depth * depth, off_value);
for(int i = 0; i < depth; i++)
{
depth_input[depth * i + i] = on_value;
}
if(contains(attributes, "axis"))
axis = attributes.at("axis").i();
if(axis == -1)
{
shape s{shape::float_type, {depth, depth}};
auto l0 = prog.add_literal({s, depth_input});
return prog.add_instruction(op::gather{0}, {l0, args[0]});
}
MIGRAPHX_THROW("MIGraphX does not support axis != -1");
}
instruction_ref parse_pack(const std::string&, instruction_ref parse_pack(const std::string&,
const attribute_map& attributes, const attribute_map& attributes,
std::vector<instruction_ref> args) std::vector<instruction_ref> args)
...@@ -799,21 +842,50 @@ struct tf_parser ...@@ -799,21 +842,50 @@ struct tf_parser
std::vector<instruction_ref> args) std::vector<instruction_ref> args)
{ {
op::slice op; op::slice op;
auto starts = args[1]->eval().get<int32_t>().to_vector(); auto starts = args[1]->eval().get<int32_t>().to_vector();
auto ends = args[2]->eval().get<int32_t>().to_vector(); auto ends = args[2]->eval().get<int32_t>().to_vector();
size_t num_axes = args[0]->get_shape().lens().size(); auto l0 = args[0];
size_t num_axes = l0->get_shape().lens().size();
std::vector<size_t> axes = l0->get_shape().lens();
op.starts = std::vector<int64_t>(starts.begin(), starts.end()); op.starts = std::vector<int64_t>(starts.begin(), starts.end());
op.ends = std::vector<int64_t>(ends.begin(), ends.end()); op.ends = std::vector<int64_t>(ends.begin(), ends.end());
op.axes = std::vector<int64_t>(num_axes); op.axes = std::vector<int64_t>(num_axes);
std::iota(op.axes.begin(), op.axes.end(), 0); std::iota(op.axes.begin(), op.axes.end(), 0);
uint32_t begin_mask = 0;
uint32_t end_mask = 0;
uint32_t shrink_axis_mask = 0; uint32_t shrink_axis_mask = 0;
uint32_t bitwise_compare = 1; uint32_t bitwise_compare = 1;
std::vector<int64_t> squeeze_axes; std::vector<int64_t> squeeze_axes;
if(contains(attributes, "begin_mask"))
begin_mask = static_cast<uint32_t>(attributes.at("begin_mask").i());
if(contains(attributes, "end_mask"))
end_mask = static_cast<uint32_t>(attributes.at("end_mask").i());
if(contains(attributes, "shrink_axis_mask")) if(contains(attributes, "shrink_axis_mask"))
shrink_axis_mask = static_cast<uint32_t>(attributes.at("shrink_axis_mask").i()); shrink_axis_mask = static_cast<uint32_t>(attributes.at("shrink_axis_mask").i());
std::vector<int64_t> begin_axes = get_axes_from_mask(num_axes, begin_mask);
std::vector<int64_t> end_axes = get_axes_from_mask(num_axes, end_mask);
for(size_t i = 0; i < num_axes; i++)
{
if(begin_axes.at(i) == 1)
{
op.starts.at(i) = 0;
}
if(end_axes.at(i) == 1)
{
op.ends.at(i) = axes.at(i);
}
}
auto l1 = prog.add_instruction(op, l0);
if(shrink_axis_mask == 0)
return l1;
for(size_t i = 0; i < num_axes; i++) for(size_t i = 0; i < num_axes; i++)
{ {
// the LSB corresponds to axis 0 when determining which axes to squeeze // the LSB corresponds to axis 0 when determining which axes to squeeze
...@@ -821,8 +893,7 @@ struct tf_parser ...@@ -821,8 +893,7 @@ struct tf_parser
squeeze_axes.push_back(i); squeeze_axes.push_back(i);
} }
auto l0 = prog.add_instruction(op, make_contiguous(args[0])); return prog.add_instruction(op::squeeze{squeeze_axes}, l1);
return to_nhwc(prog.add_instruction(op::squeeze{squeeze_axes}, l0));
} }
instruction_ref instruction_ref
...@@ -862,10 +933,16 @@ struct tf_parser ...@@ -862,10 +933,16 @@ struct tf_parser
if(instructions.count(name) == 0) if(instructions.count(name) == 0)
{ {
auto&& node = nodes.at(name); auto&& node = nodes.at(name);
// assert ops ignored
if(node.op() == "Assert" or contains(name, "Assert"))
return;
std::vector<instruction_ref> args; std::vector<instruction_ref> args;
for(auto&& input : node.input()) for(auto&& input : node.input())
{ {
// control dependencies (signified by ^ before the name) are ignored
if(contains(input, "^"))
continue;
if(nodes.count(input) > 0) if(nodes.count(input) > 0)
{ {
auto&& iname = get_name(nodes.at(input)); auto&& iname = get_name(nodes.at(input));
......
...@@ -4,6 +4,13 @@ from onnx import helper ...@@ -4,6 +4,13 @@ from onnx import helper
from onnx import numpy_helper from onnx import numpy_helper
from onnx import AttributeProto, TensorProto, GraphProto from onnx import AttributeProto, TensorProto, GraphProto
def onnx_test(op_test):
def run_test():
model_def = helper.make_model(op_test(), producer_name=op_test.__name__)
onnx.save(model_def, '{}.onnx'.format(op_test.__name__))
return run_test
@onnx_test
def acos_test(): def acos_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
...@@ -14,16 +21,14 @@ def acos_test(): ...@@ -14,16 +21,14 @@ def acos_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_acos', 'test_acos',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='acos-example') @onnx_test
onnx.save(model_def, 'onnx_acos.onnx')
def add_bcast_test(): def add_bcast_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
...@@ -37,16 +42,14 @@ def add_bcast_test(): ...@@ -37,16 +42,14 @@ def add_bcast_test():
outputs=['2'] outputs=['2']
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-add_bcast', 'test-add_bcast',
[x,y], [x,y],
[z] [z]
) )
model_def = helper.make_model(graph_def, producer_name='add_bcast-example') @onnx_test
onnx.save(model_def, 'add_bcast_test.onnx')
def add_fp16_test(): def add_fp16_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [1]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [1])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [1]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [1])
...@@ -58,7 +61,7 @@ def add_fp16_test(): ...@@ -58,7 +61,7 @@ def add_fp16_test():
outputs=['2'], outputs=['2'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-add-fp16', 'test-add-fp16',
[x,y], [x,y],
...@@ -71,6 +74,7 @@ def add_fp16_test(): ...@@ -71,6 +74,7 @@ def add_fp16_test():
model_def = helper.make_model(graph_def, producer_name=('add-fp16-example')) model_def = helper.make_model(graph_def, producer_name=('add-fp16-example'))
onnx.save(model_def, 'add_fp16_test.onnx') onnx.save(model_def, 'add_fp16_test.onnx')
@onnx_test
def add_scalar_test(): def add_scalar_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, []) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [])
...@@ -82,7 +86,7 @@ def add_scalar_test(): ...@@ -82,7 +86,7 @@ def add_scalar_test():
outputs=['2'] outputs=['2']
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-add-scalar', 'test-add-scalar',
[x,y], [x,y],
...@@ -90,9 +94,7 @@ def add_scalar_test(): ...@@ -90,9 +94,7 @@ def add_scalar_test():
initializer=[helper.make_tensor('1', TensorProto.FLOAT, [], [1])] initializer=[helper.make_tensor('1', TensorProto.FLOAT, [], [1])]
) )
model_def = helper.make_model(graph_def, producer_name='add_scalar-example') @onnx_test
onnx.save(model_def, 'add_scalar_test.onnx')
def argmax_test(): def argmax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
...@@ -106,16 +108,14 @@ def argmax_test(): ...@@ -106,16 +108,14 @@ def argmax_test():
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_argmax', 'test_argmax',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='argmax-example') @onnx_test
onnx.save(model_def, 'argmax_test.onnx')
def argmin_test(): def argmin_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5])
...@@ -128,16 +128,14 @@ def argmin_test(): ...@@ -128,16 +128,14 @@ def argmin_test():
keepdims = 0 keepdims = 0
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_argmin', 'test_argmin',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='argmin-example') @onnx_test
onnx.save(model_def, 'argmin_test.onnx')
def asin_test(): def asin_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
...@@ -148,16 +146,14 @@ def asin_test(): ...@@ -148,16 +146,14 @@ def asin_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_asin', 'test_asin',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='asin-example') @onnx_test
onnx.save(model_def, 'asin_test.onnx')
def atan_test(): def atan_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
...@@ -168,16 +164,14 @@ def atan_test(): ...@@ -168,16 +164,14 @@ def atan_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_atan', 'test_atan',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='atan-example') @onnx_test
onnx.save(model_def, 'atan_test.onnx')
def cast_test(): def cast_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
...@@ -189,16 +183,14 @@ def cast_test(): ...@@ -189,16 +183,14 @@ def cast_test():
to = 1 to = 1
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_cast', 'test_cast',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='cast-example') @onnx_test
onnx.save(model_def, 'cast_test.onnx')
def clip_test(): def clip_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
...@@ -212,16 +204,14 @@ def clip_test(): ...@@ -212,16 +204,14 @@ def clip_test():
min=0.0 min=0.0
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-model', 'test-model',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='clip-example') @onnx_test
onnx.save(model_def, 'clip_test.onnx')
def concat_test(): def concat_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 4, 3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 4, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7, 4, 3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7, 4, 3])
...@@ -234,16 +224,14 @@ def concat_test(): ...@@ -234,16 +224,14 @@ def concat_test():
outputs=['2'], outputs=['2'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-concat', 'test-concat',
[x,y], [x,y],
[z] [z]
) )
model_def = helper.make_model(graph_def, producer_name='concat-example') @onnx_test
onnx.save(model_def, 'concat_test.onnx')
def constant_test(): def constant_test():
x = np.array([0, 1, 2]) x = np.array([0, 1, 2])
y = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
...@@ -260,16 +248,14 @@ def constant_test(): ...@@ -260,16 +248,14 @@ def constant_test():
), ),
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-constant', 'test-constant',
[], [],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name=('constant-example')) @onnx_test
onnx.save(model_def, 'constant_test.onnx')
def constant_fill_test(): def constant_fill_test():
value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3]) value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3])
...@@ -283,16 +269,14 @@ def constant_fill_test(): ...@@ -283,16 +269,14 @@ def constant_fill_test():
input_as_shape = 0, input_as_shape = 0,
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'constant_fill', 'constant_fill',
[], [],
[value], [value],
) )
model_def = helper.make_model(graph_def, producer_name='constant-fill-example') @onnx_test
onnx.save(model_def, 'constant_fill_test.onnx')
def constant_fill_input_as_shape_test(): def constant_fill_input_as_shape_test():
np_shape = np.array([2, 3]) np_shape = np.array([2, 3])
shape = helper.make_tensor_value_info('shape', TensorProto.INT32, [2]) shape = helper.make_tensor_value_info('shape', TensorProto.INT32, [2])
...@@ -321,16 +305,14 @@ def constant_fill_input_as_shape_test(): ...@@ -321,16 +305,14 @@ def constant_fill_input_as_shape_test():
input_as_shape = 1, input_as_shape = 1,
) )
graph_def = helper.make_graph( return helper.make_graph(
[const_shape_node, node], [const_shape_node, node],
'constant_fill', 'constant_fill',
[], [],
[value], [value],
) )
model_def = helper.make_model(graph_def, producer_name='constant-fill-example') @onnx_test
onnx.save(model_def, 'constant_fill_input_as_shape_test.onnx')
def constant_scalar_test(): def constant_scalar_test():
x = np.array([1]) x = np.array([1])
y = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1]) y = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1])
...@@ -347,16 +329,14 @@ def constant_scalar_test(): ...@@ -347,16 +329,14 @@ def constant_scalar_test():
), ),
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-constant', 'test-constant',
[], [],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name=('constant-scalar-example')) @onnx_test
onnx.save(model_def, 'constant_scalar_test.onnx')
def const_of_shape_empty_input_test(): def const_of_shape_empty_input_test():
tensor_val = onnx.helper.make_tensor( tensor_val = onnx.helper.make_tensor(
'value', 'value',
...@@ -385,16 +365,14 @@ def const_of_shape_empty_input_test(): ...@@ -385,16 +365,14 @@ def const_of_shape_empty_input_test():
value = tensor_val, value = tensor_val,
) )
graph_def = helper.make_graph( return helper.make_graph(
[shape_const, node], [shape_const, node],
'constant_of_shape', 'constant_of_shape',
[], [],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='constant-of-shape') @onnx_test
onnx.save(model_def, 'const_of_shape_empty_input_test.onnx')
def const_of_shape_float_test(): def const_of_shape_float_test():
tensor_val = onnx.helper.make_tensor( tensor_val = onnx.helper.make_tensor(
'value', 'value',
...@@ -423,16 +401,14 @@ def const_of_shape_float_test(): ...@@ -423,16 +401,14 @@ def const_of_shape_float_test():
value = tensor_val value = tensor_val
) )
graph_def = helper.make_graph( return helper.make_graph(
[shape_const, node], [shape_const, node],
'constant_of_shape', 'constant_of_shape',
[], [],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='constant-of-shape') @onnx_test
onnx.save(model_def, 'const_of_shape_float_test.onnx')
def const_of_shape_int64_test(): def const_of_shape_int64_test():
tensor_val = onnx.helper.make_tensor( tensor_val = onnx.helper.make_tensor(
'value', 'value',
...@@ -460,16 +436,14 @@ def const_of_shape_int64_test(): ...@@ -460,16 +436,14 @@ def const_of_shape_int64_test():
value = tensor_val value = tensor_val
) )
graph_def = helper.make_graph( return helper.make_graph(
[shape_const, node], [shape_const, node],
'constant_of_shape', 'constant_of_shape',
[], [],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='constant-of-shape') @onnx_test
onnx.save(model_def, 'const_of_shape_int64_test.onnx')
def const_of_shape_no_value_attr_test(): def const_of_shape_no_value_attr_test():
shape_val = np.array([2, 3, 4]).astype(np.int64) shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor( shape_ts = helper.make_tensor(
...@@ -492,16 +466,14 @@ def const_of_shape_no_value_attr_test(): ...@@ -492,16 +466,14 @@ def const_of_shape_no_value_attr_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[shape_const, node], [shape_const, node],
'constant_of_shape', 'constant_of_shape',
[], [],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='constant-of-shape') @onnx_test
onnx.save(model_def, 'const_of_shape_no_value_attr_test.onnx')
def conv_autopad_fail_test(): def conv_autopad_fail_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 1, 1]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 1, 1])
...@@ -517,16 +489,14 @@ def conv_autopad_fail_test(): ...@@ -517,16 +489,14 @@ def conv_autopad_fail_test():
pads = [0,0,1,1,0,0,1,1] pads = [0,0,1,1,0,0,1,1]
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_conv', 'test_conv',
[x, y], [x, y],
[out], [out],
) )
model_def = helper.make_model(graph_def, producer_name='conv-example') @onnx_test
onnx.save(model_def, 'conv_autopad_fail_test.onnx')
def conv_bias_test(): def conv_bias_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5])
...@@ -541,16 +511,14 @@ def conv_bias_test(): ...@@ -541,16 +511,14 @@ def conv_bias_test():
strides = [1, 1] strides = [1, 1]
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_conv', 'test_conv',
[x, y, z], [x, y, z],
[out], [out],
) )
model_def = helper.make_model(graph_def, producer_name='conv-example') @onnx_test
onnx.save(model_def, 'conv_bias_test.onnx')
def conv_bn_relu_maxpool_test(): def conv_bn_relu_maxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5])
...@@ -592,16 +560,14 @@ def conv_bn_relu_maxpool_test(): ...@@ -592,16 +560,14 @@ def conv_bn_relu_maxpool_test():
kernel_shape=[2,2] kernel_shape=[2,2]
) )
graph_def = helper.make_graph( return helper.make_graph(
[node0, node1, node2, node3], [node0, node1, node2, node3],
'test_conv_bn_relu', 'test_conv_bn_relu',
[x, y, z, m, n, k, l], [x, y, z, m, n, k, l],
[out], [out],
) )
model_def = helper.make_model(graph_def, producer_name='conv_relu-example') @onnx_test
onnx.save(model_def, 'conv_bn_relu_maxpool_test.onnx')
def conv_relu_maxpool_test(): def conv_relu_maxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5])
...@@ -632,16 +598,14 @@ def conv_relu_maxpool_test(): ...@@ -632,16 +598,14 @@ def conv_relu_maxpool_test():
kernel_shape=[2,2] kernel_shape=[2,2]
) )
graph_def = helper.make_graph( return helper.make_graph(
[node1, node2, node3], [node1, node2, node3],
'test_conv_relu', 'test_conv_relu',
[x, y, z], [x, y, z],
[out], [out],
) )
model_def = helper.make_model(graph_def, producer_name='conv_relu-example') @onnx_test
onnx.save(model_def, 'conv_relu_maxpool_test.onnx')
def conv_relu_maxpool_x2_test(): def conv_relu_maxpool_x2_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [5, 3, 5, 5]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [5, 3, 5, 5])
...@@ -698,16 +662,14 @@ def conv_relu_maxpool_x2_test(): ...@@ -698,16 +662,14 @@ def conv_relu_maxpool_x2_test():
kernel_shape=[2,2] kernel_shape=[2,2]
) )
graph_def = helper.make_graph( return helper.make_graph(
[node1, node2, node3, node4, node5, node6], [node1, node2, node3, node4, node5, node6],
'test_conv_relu2', 'test_conv_relu2',
[x, y, z, m, n], [x, y, z, m, n],
[out], [out],
) )
model_def = helper.make_model(graph_def, producer_name='conv_relu-example') @onnx_test
onnx.save(model_def, 'conv_relu_maxpool_x2_test.onnx')
def cos_test(): def cos_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
...@@ -718,16 +680,14 @@ def cos_test(): ...@@ -718,16 +680,14 @@ def cos_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_cos', 'test_cos',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='cos-example') @onnx_test
onnx.save(model_def, 'cos_test.onnx')
def cosh_test(): def cosh_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1])
...@@ -738,16 +698,14 @@ def cosh_test(): ...@@ -738,16 +698,14 @@ def cosh_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_cosh', 'test_cosh',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='cosh-example') @onnx_test
onnx.save(model_def, 'cosh_test.onnx')
def dropout_test(): def dropout_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 2, 2]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 2, 2]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 2, 2])
...@@ -758,16 +716,14 @@ def dropout_test(): ...@@ -758,16 +716,14 @@ def dropout_test():
outputs=['1'], outputs=['1'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-dropout', 'test-dropout',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='dropout-example') @onnx_test
onnx.save(model_def, 'dropout_test.onnx')
def elu_test(): def elu_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
...@@ -779,16 +735,14 @@ def elu_test(): ...@@ -779,16 +735,14 @@ def elu_test():
alpha=0.01 alpha=0.01
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-model', 'test-model',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='elu-example') @onnx_test
onnx.save(model_def, 'elu_test.onnx')
def erf_test(): def erf_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10, 15]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10, 15])
...@@ -799,16 +753,14 @@ def erf_test(): ...@@ -799,16 +753,14 @@ def erf_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_erf', 'test_erf',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='erf-example') @onnx_test
onnx.save(model_def, 'erf_test.onnx')
def exp_test(): def exp_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
...@@ -819,16 +771,14 @@ def exp_test(): ...@@ -819,16 +771,14 @@ def exp_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_exp', 'test_exp',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='exp-example') @onnx_test
onnx.save(model_def, 'exp_test.onnx')
def expand_test(): def expand_test():
shape_val = np.array([2, 3, 4, 5]).astype(np.int64) shape_val = np.array([2, 3, 4, 5]).astype(np.int64)
shape_ts = helper.make_tensor( shape_ts = helper.make_tensor(
...@@ -852,16 +802,14 @@ def expand_test(): ...@@ -852,16 +802,14 @@ def expand_test():
outputs=['y'] outputs=['y']
) )
graph_def = helper.make_graph( return helper.make_graph(
[shape_const, node], [shape_const, node],
'expand', 'expand',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='expand') @onnx_test
onnx.save(model_def, 'expand_test.onnx')
def flatten_test(): def flatten_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [6, 20]) y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [6, 20])
...@@ -880,16 +828,14 @@ def flatten_test(): ...@@ -880,16 +828,14 @@ def flatten_test():
outputs=['3'] outputs=['3']
) )
graph_def = helper.make_graph( return helper.make_graph(
[node,node2], [node,node2],
'test-flatten', 'test-flatten',
[x], [x],
[y,y2] [y,y2]
) )
model_def = helper.make_model(graph_def, producer_name=('flatten-example')) @onnx_test
onnx.save(model_def, 'flatten_test.onnx')
def gather_test(): def gather_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4, 5, 6])
i = helper.make_tensor_value_info('indices', TensorProto.INT32, [2, 3, 4, 5]) i = helper.make_tensor_value_info('indices', TensorProto.INT32, [2, 3, 4, 5])
...@@ -902,16 +848,14 @@ def gather_test(): ...@@ -902,16 +848,14 @@ def gather_test():
axis=1, axis=1,
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_gather', 'test_gather',
[x, i], [x, i],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='gather-example') @onnx_test
onnx.save(model_def, 'gather_test.onnx')
def gemm_test(): def gemm_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 7]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 7])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [11, 5]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [11, 5])
...@@ -928,16 +872,14 @@ def gemm_test(): ...@@ -928,16 +872,14 @@ def gemm_test():
transB=1 transB=1
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-gemm', 'test-gemm',
[x, y, z], [x, y, z],
[a] [a]
) )
model_def = helper.make_model(graph_def, producer_name=('gemm-example')) @onnx_test
onnx.save(model_def, 'gemm_test.onnx')
def gemm_ex_test(): def gemm_ex_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 1, 5, 6]) m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 1, 5, 6])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 7]) m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 7])
...@@ -953,16 +895,14 @@ def gemm_ex_test(): ...@@ -953,16 +895,14 @@ def gemm_ex_test():
transA = 1 transA = 1
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_gemm_ex', 'test_gemm_ex',
[m1, m2, m3], [m1, m2, m3],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='gemm-example') @onnx_test
onnx.save(model_def, 'gemm_ex_test.onnx')
def gemm_ex_brcst_test(): def gemm_ex_brcst_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 1, 5, 6]) m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 1, 5, 6])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 7]) m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 7])
...@@ -978,16 +918,14 @@ def gemm_ex_brcst_test(): ...@@ -978,16 +918,14 @@ def gemm_ex_brcst_test():
transA = 1 transA = 1
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_gemm_ex', 'test_gemm_ex',
[m1, m2, m3], [m1, m2, m3],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='gemm-example') @onnx_test
onnx.save(model_def, 'gemm_ex_brcst_test.onnx')
def globalavgpool_test(): def globalavgpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1,3,16,16]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1,3,16,16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1,3,1,1]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1,3,1,1])
...@@ -998,16 +936,14 @@ def globalavgpool_test(): ...@@ -998,16 +936,14 @@ def globalavgpool_test():
outputs=['1'], outputs=['1'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-globalavgpool', 'test-globalavgpool',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='globalavgpool-example') @onnx_test
onnx.save(model_def, 'globalavgpool_test.onnx')
def globalmaxpool_test(): def globalmaxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1,3,16,16]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1,3,16,16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1,3,1,1]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1,3,1,1])
...@@ -1018,16 +954,14 @@ def globalmaxpool_test(): ...@@ -1018,16 +954,14 @@ def globalmaxpool_test():
outputs=['1'], outputs=['1'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-globalmaxpool', 'test-globalmaxpool',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='globalmaxpool-example') @onnx_test
onnx.save(model_def, 'globalmaxpool_test.onnx')
def group_conv_test(): def group_conv_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 4, 16, 16]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 4, 16, 16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 1, 3, 3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 1, 3, 3])
...@@ -1040,16 +974,14 @@ def group_conv_test(): ...@@ -1040,16 +974,14 @@ def group_conv_test():
outputs=['2'], outputs=['2'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-group_conv', 'test-group_conv',
[x,y], [x,y],
[z] [z]
) )
model_def = helper.make_model(graph_def, producer_name='group_conv-example') @onnx_test
onnx.save(model_def, 'group_conv_test.onnx')
def imagescaler_test(): def imagescaler_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1,3,16,16]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1,3,16,16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1,3,16,16]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1,3,16,16])
...@@ -1062,16 +994,14 @@ def imagescaler_test(): ...@@ -1062,16 +994,14 @@ def imagescaler_test():
scale=0.5 scale=0.5
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-imagescaler', 'test-imagescaler',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='imagescaler-example') @onnx_test
onnx.save(model_def, 'imagescaler_test.onnx')
def implicit_add_bcast_test(): def implicit_add_bcast_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4, 1]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4, 1])
...@@ -1083,16 +1013,14 @@ def implicit_add_bcast_test(): ...@@ -1083,16 +1013,14 @@ def implicit_add_bcast_test():
outputs=['2'], outputs=['2'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-multi_bcast', 'test-multi_bcast',
[x,y], [x,y],
[z] [z]
) )
model_def = helper.make_model(graph_def, producer_name='implicit_bcast-example') @onnx_test
onnx.save(model_def, 'implicit_add_bcast_test.onnx')
def implicit_pow_bcast_test(): def implicit_pow_bcast_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4, 1]) arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4, 1])
...@@ -1104,16 +1032,14 @@ def implicit_pow_bcast_test(): ...@@ -1104,16 +1032,14 @@ def implicit_pow_bcast_test():
outputs=['out'], outputs=['out'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'pow_test', 'pow_test',
[arg0, arg1], [arg0, arg1],
[arg_out], [arg_out],
) )
model_def = helper.make_model(graph_def, producer_name='pow2') @onnx_test
onnx.save(model_def, 'implicit_pow_bcast_test.onnx')
def implicit_sub_bcast_test(): def implicit_sub_bcast_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5]) arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5])
...@@ -1125,16 +1051,14 @@ def implicit_sub_bcast_test(): ...@@ -1125,16 +1051,14 @@ def implicit_sub_bcast_test():
outputs=['out'], outputs=['out'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'subtraction2', 'subtraction2',
[arg0, arg1], [arg0, arg1],
[arg_out], [arg_out],
) )
model_def = helper.make_model(graph_def, producer_name='add2') @onnx_test
onnx.save(model_def, 'implicit_sub_bcast_test.onnx')
def leaky_relu_test(): def leaky_relu_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
...@@ -1146,16 +1070,14 @@ def leaky_relu_test(): ...@@ -1146,16 +1070,14 @@ def leaky_relu_test():
alpha=0.01 alpha=0.01
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-model', 'test-model',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='leaky_relu-example') @onnx_test
onnx.save(model_def, 'leaky_relu_test.onnx')
def log_test(): def log_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
...@@ -1166,16 +1088,14 @@ def log_test(): ...@@ -1166,16 +1088,14 @@ def log_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_log', 'test_log',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='log-example') @onnx_test
onnx.save(model_def, 'log_test.onnx')
def logsoftmax_test(): def logsoftmax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5, 6])
...@@ -1187,16 +1107,14 @@ def logsoftmax_test(): ...@@ -1187,16 +1107,14 @@ def logsoftmax_test():
axis = 1 axis = 1
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_logsoftmax', 'test_logsoftmax',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='logsoftmax-example') @onnx_test
onnx.save(model_def, 'logsoftmax_test.onnx')
def lrn_test(): def lrn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 28, 24, 24]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 28, 24, 24])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 28, 24, 24]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 28, 24, 24])
...@@ -1211,16 +1129,14 @@ def lrn_test(): ...@@ -1211,16 +1129,14 @@ def lrn_test():
outputs=['1'] outputs=['1']
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-lrn', 'test-lrn',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name=('lrn-example')) @onnx_test
onnx.save(model_def, 'lrn_test.onnx')
def matmul_bmbm_test(): def matmul_bmbm_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 6, 7]) m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 6, 7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [5, 2, 1, 7, 8]) m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [5, 2, 1, 7, 8])
...@@ -1232,16 +1148,14 @@ def matmul_bmbm_test(): ...@@ -1232,16 +1148,14 @@ def matmul_bmbm_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_matmul', 'test_matmul',
[m1, m2], [m1, m2],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='matmul-example') @onnx_test
onnx.save(model_def, 'matmul_bmbm_test.onnx')
def matmul_bmv_test(): def matmul_bmv_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 6, 7]) m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 6, 7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7]) m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7])
...@@ -1253,16 +1167,14 @@ def matmul_bmv_test(): ...@@ -1253,16 +1167,14 @@ def matmul_bmv_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_matmul', 'test_matmul',
[m1, m2], [m1, m2],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='matmul-example') @onnx_test
onnx.save(model_def, 'matmul_bmv_test.onnx')
def matmul_mv_test(): def matmul_mv_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [6, 7]) m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [6, 7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7]) m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7])
...@@ -1274,16 +1186,14 @@ def matmul_mv_test(): ...@@ -1274,16 +1186,14 @@ def matmul_mv_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_matmul', 'test_matmul',
[m1, m2], [m1, m2],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='matmul-example') @onnx_test
onnx.save(model_def, 'matmul_mv_test.onnx')
def matmul_vbm_test(): def matmul_vbm_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7]) m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [5, 7, 8]) m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [5, 7, 8])
...@@ -1295,16 +1205,14 @@ def matmul_vbm_test(): ...@@ -1295,16 +1205,14 @@ def matmul_vbm_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_matmul', 'test_matmul',
[m1, m2], [m1, m2],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='matmul-example') @onnx_test
onnx.save(model_def, 'matmul_vbm_test.onnx')
def matmul_vm_test(): def matmul_vm_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7]) m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7, 8]) m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7, 8])
...@@ -1316,16 +1224,14 @@ def matmul_vm_test(): ...@@ -1316,16 +1224,14 @@ def matmul_vm_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_matmul', 'test_matmul',
[m1, m2], [m1, m2],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='matmul-example') @onnx_test
onnx.save(model_def, 'matmul_vm_test.onnx')
def matmul_vv_test(): def matmul_vv_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7]) m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7]) m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7])
...@@ -1337,16 +1243,14 @@ def matmul_vv_test(): ...@@ -1337,16 +1243,14 @@ def matmul_vv_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_matmul', 'test_matmul',
[m1, m2], [m1, m2],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='matmul-example') @onnx_test
onnx.save(model_def, 'matmul_vv_test.onnx')
def max_test(): def max_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
...@@ -1359,16 +1263,14 @@ def max_test(): ...@@ -1359,16 +1263,14 @@ def max_test():
outputs=['3'], outputs=['3'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-dropout', 'test-dropout',
[a, b, c], [a, b, c],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='max-example') @onnx_test
onnx.save(model_def, 'max_test.onnx')
def min_test(): def min_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
...@@ -1381,16 +1283,14 @@ def min_test(): ...@@ -1381,16 +1283,14 @@ def min_test():
outputs=['3'], outputs=['3'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-dropout', 'test-dropout',
[a, b, c], [a, b, c],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='min-example') @onnx_test
onnx.save(model_def, 'min_test.onnx')
def no_pad_test(): def no_pad_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 2]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 2])
...@@ -1403,16 +1303,14 @@ def no_pad_test(): ...@@ -1403,16 +1303,14 @@ def no_pad_test():
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-no-pad', 'test-no-pad',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='no-pad-example') @onnx_test
onnx.save(model_def, 'no_pad_test.onnx')
def pad_test(): def pad_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 4]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 4])
...@@ -1425,16 +1323,14 @@ def pad_test(): ...@@ -1425,16 +1323,14 @@ def pad_test():
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-pad', 'test-pad',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='pad-example') @onnx_test
onnx.save(model_def, 'pad_test.onnx')
def pow_test(): def pow_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 3, 4, 5]) arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 3, 4, 5])
...@@ -1447,16 +1343,14 @@ def pow_test(): ...@@ -1447,16 +1343,14 @@ def pow_test():
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'pow_test', 'pow_test',
[arg0, arg1], [arg0, arg1],
[arg_out], [arg_out],
) )
model_def = helper.make_model(graph_def, producer_name='pow2') @onnx_test
onnx.save(model_def, 'pow_test.onnx')
def reducemean_test(): def reducemean_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
...@@ -1470,16 +1364,14 @@ def reducemean_test(): ...@@ -1470,16 +1364,14 @@ def reducemean_test():
keepdims = 0 keepdims = 0
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_reducemean', 'test_reducemean',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='reducemean-example') @onnx_test
onnx.save(model_def, 'reducemean_test.onnx')
def reducemean_keepdims_test(): def reducemean_keepdims_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
...@@ -1493,16 +1385,14 @@ def reducemean_keepdims_test(): ...@@ -1493,16 +1385,14 @@ def reducemean_keepdims_test():
keepdims = 1 keepdims = 1
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_reducemean', 'test_reducemean',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='reducemean-example') @onnx_test
onnx.save(model_def, 'reducemean_keepdims_test.onnx')
def reducesum_test(): def reducesum_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1])
...@@ -1516,16 +1406,14 @@ def reducesum_test(): ...@@ -1516,16 +1406,14 @@ def reducesum_test():
keepdims = 0 keepdims = 0
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_reducesum', 'test_reducesum',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='reducesum-example') @onnx_test
onnx.save(model_def, 'reducesum_test.onnx')
def reducesum_multiaxis_test(): def reducesum_multiaxis_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1])
...@@ -1539,16 +1427,14 @@ def reducesum_multiaxis_test(): ...@@ -1539,16 +1427,14 @@ def reducesum_multiaxis_test():
keepdims = 0 keepdims = 0
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_reducesum', 'test_reducesum',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='reducesum-example') @onnx_test
onnx.save(model_def, 'reducesum_multiaxis_test.onnx')
def reducesum_keepdims_test(): def reducesum_keepdims_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1])
...@@ -1562,16 +1448,14 @@ def reducesum_keepdims_test(): ...@@ -1562,16 +1448,14 @@ def reducesum_keepdims_test():
keepdims = 1 keepdims = 1
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_reducesum', 'test_reducesum',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='reducesum-example') @onnx_test
onnx.save(model_def, 'reducesum_keepdims_test.onnx')
def reshape_test(): def reshape_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [4, 2, 3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [4, 2, 3])
x_shape = helper.make_tensor_value_info('1', TensorProto.INT64, [2]) x_shape = helper.make_tensor_value_info('1', TensorProto.INT64, [2])
...@@ -1592,7 +1476,7 @@ def reshape_test(): ...@@ -1592,7 +1476,7 @@ def reshape_test():
outputs=['3'] outputs=['3']
) )
graph_def = helper.make_graph( return helper.make_graph(
[node,node2], [node,node2],
'test-reshape', 'test-reshape',
[x, x_shape], [x, x_shape],
...@@ -1600,9 +1484,7 @@ def reshape_test(): ...@@ -1600,9 +1484,7 @@ def reshape_test():
initializer=[helper.make_tensor('1', TensorProto.INT64, [2], [3, 8])] initializer=[helper.make_tensor('1', TensorProto.INT64, [2], [3, 8])]
) )
model_def = helper.make_model(graph_def, producer_name=('reshape-example')) @onnx_test
onnx.save(model_def, 'reshape_test.onnx')
def reshape_non_standard_test(): def reshape_non_standard_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4])
trans_x = helper.make_tensor_value_info('trans_x', TensorProto.FLOAT, [2, 4, 3]) trans_x = helper.make_tensor_value_info('trans_x', TensorProto.FLOAT, [2, 4, 3])
...@@ -1622,16 +1504,14 @@ def reshape_non_standard_test(): ...@@ -1622,16 +1504,14 @@ def reshape_non_standard_test():
shape=[4, 3, 2] shape=[4, 3, 2]
) )
graph_def = helper.make_graph( return helper.make_graph(
[trans, res], [trans, res],
'reshape-ns', 'reshape-ns',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='reshape') @onnx_test
onnx.save(model_def, 'reshape_non_standard_test.onnx')
def shape_test(): def shape_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [4]) y = helper.make_tensor_value_info('y', TensorProto.INT64, [4])
...@@ -1642,16 +1522,14 @@ def shape_test(): ...@@ -1642,16 +1522,14 @@ def shape_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_shape', 'test_shape',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='shape-example') @onnx_test
onnx.save(model_def, 'shape_test.onnx')
def shape_gather_test(): def shape_gather_test():
values = np.array([1]) values = np.array([1])
value = helper.make_tensor_value_info('value', TensorProto.INT32, [1]) value = helper.make_tensor_value_info('value', TensorProto.INT32, [1])
...@@ -1685,16 +1563,14 @@ def shape_gather_test(): ...@@ -1685,16 +1563,14 @@ def shape_gather_test():
axis=0, axis=0,
) )
graph_def = helper.make_graph( return helper.make_graph(
[node_const, node_shape, node_gather], [node_const, node_shape, node_gather],
'shape_gather', 'shape_gather',
[x], [x],
[z], [z],
) )
model_def = helper.make_model(graph_def, producer_name='shape-gather-example') @onnx_test
onnx.save(model_def, 'shape_gather_test.onnx')
def sign_test(): def sign_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [10, 5]) x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [10, 5])
y = helper.make_tensor_value_info('y', TensorProto.DOUBLE, [10, 5]) y = helper.make_tensor_value_info('y', TensorProto.DOUBLE, [10, 5])
...@@ -1705,16 +1581,14 @@ def sign_test(): ...@@ -1705,16 +1581,14 @@ def sign_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_sign', 'test_sign',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='sign-example') @onnx_test
onnx.save(model_def, 'sign_test.onnx')
def sin_test(): def sin_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
...@@ -1725,16 +1599,14 @@ def sin_test(): ...@@ -1725,16 +1599,14 @@ def sin_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_sin', 'test_sin',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='sin-example') @onnx_test
onnx.save(model_def, 'sin_test.onnx')
def sinh_test(): def sinh_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
...@@ -1745,16 +1617,14 @@ def sinh_test(): ...@@ -1745,16 +1617,14 @@ def sinh_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_sinh', 'test_sinh',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='sinh-example') @onnx_test
onnx.save(model_def, 'sinh_test.onnx')
def slice_test(): def slice_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 2]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 2]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 2])
...@@ -1768,16 +1638,14 @@ def slice_test(): ...@@ -1768,16 +1638,14 @@ def slice_test():
outputs=['1'] outputs=['1']
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-slice', 'test-slice',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name=('slice-example')) @onnx_test
onnx.save(model_def, 'slice_test.onnx')
def softmax_test(): def softmax_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3])
...@@ -1788,16 +1656,14 @@ def softmax_test(): ...@@ -1788,16 +1656,14 @@ def softmax_test():
outputs=['1'] outputs=['1']
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-softmax', 'test-softmax',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name=('softmax-example')) @onnx_test
onnx.save(model_def, 'softmax_test.onnx')
def sqrt_test(): def sqrt_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10, 15]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10, 15])
...@@ -1808,16 +1674,14 @@ def sqrt_test(): ...@@ -1808,16 +1674,14 @@ def sqrt_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_sqrt', 'test_sqrt',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='sqrt-example') @onnx_test
onnx.save(model_def, 'sqrt_test.onnx')
def squeeze_unsqueeze_test(): def squeeze_unsqueeze_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 1, 1, 2, 1]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 1, 1, 2, 1])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 2]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 2])
...@@ -1837,16 +1701,14 @@ def squeeze_unsqueeze_test(): ...@@ -1837,16 +1701,14 @@ def squeeze_unsqueeze_test():
outputs=['2'] outputs=['2']
) )
graph_def = helper.make_graph( return helper.make_graph(
[node,node2], [node,node2],
'test-squeeze-unsqueeze', 'test-squeeze-unsqueeze',
[x], [x],
[z] [z]
) )
model_def = helper.make_model(graph_def, producer_name=('squeeze-unsqueeze-example')) @onnx_test
onnx.save(model_def, 'squeeze_unsqueeze_test.onnx')
def sub_bcast_test(): def sub_bcast_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4]) arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
...@@ -1861,16 +1723,14 @@ def sub_bcast_test(): ...@@ -1861,16 +1723,14 @@ def sub_bcast_test():
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'subtraction2', 'subtraction2',
[arg0, arg1], [arg0, arg1],
[arg_out], [arg_out],
) )
model_def = helper.make_model(graph_def, producer_name='subtraction2') @onnx_test
onnx.save(model_def, 'sub_bcast_test.onnx')
def sub_scalar_test(): def sub_scalar_test():
values = np.array([1]) values = np.array([1])
arg_node = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) arg_node = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
...@@ -1897,16 +1757,14 @@ def sub_scalar_test(): ...@@ -1897,16 +1757,14 @@ def sub_scalar_test():
outputs=['out'], outputs=['out'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[arg_const, node], [arg_const, node],
'subtraction1', 'subtraction1',
[arg_node], [arg_node],
[arg_out], [arg_out],
) )
model_def = helper.make_model(graph_def, producer_name='subtraction1') @onnx_test
onnx.save(model_def, 'sub_scalar_test.onnx')
def sum_test(): def sum_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
...@@ -1920,16 +1778,14 @@ def sum_test(): ...@@ -1920,16 +1778,14 @@ def sum_test():
outputs=['3'], outputs=['3'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-sum', 'test-sum',
[a, b, c], [a, b, c],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='sum-example') @onnx_test
onnx.save(model_def, 'sum_test.onnx')
def sum_test(): def sum_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
...@@ -1942,16 +1798,14 @@ def sum_test(): ...@@ -1942,16 +1798,14 @@ def sum_test():
outputs=['3'], outputs=['3'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-sum', 'test-sum',
[a, b, c], [a, b, c],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='sum-example') @onnx_test
onnx.save(model_def, 'sum_test.onnx')
def tan_test(): def tan_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
...@@ -1962,16 +1816,14 @@ def tan_test(): ...@@ -1962,16 +1816,14 @@ def tan_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_tan', 'test_tan',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='tan-example') @onnx_test
onnx.save(model_def, 'tan_test.onnx')
def tanh_test(): def tanh_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1])
...@@ -1982,16 +1834,14 @@ def tanh_test(): ...@@ -1982,16 +1834,14 @@ def tanh_test():
outputs=['y'], outputs=['y'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test_tanh', 'test_tanh',
[x], [x],
[y], [y],
) )
model_def = helper.make_model(graph_def, producer_name='tanh-example') @onnx_test
onnx.save(model_def, 'tahn_test.onnx')
def transpose_test(): def transpose_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 2, 3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 2, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 2, 2]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 2, 2])
...@@ -2003,16 +1853,49 @@ def transpose_test(): ...@@ -2003,16 +1853,49 @@ def transpose_test():
outputs=['1'], outputs=['1'],
) )
graph_def = helper.make_graph( return helper.make_graph(
[node], [node],
'test-transpose', 'test-transpose',
[x], [x],
[y] [y]
) )
model_def = helper.make_model(graph_def, producer_name='transpose-example') @onnx_test
onnx.save(model_def, 'transpose_test.onnx') def transpose_gather_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 5, 4, 6])
i = helper.make_tensor_value_info('indices', TensorProto.INT32, [2, 4, 3, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 2, 3, 4, 5, 4, 5, 6])
td = onnx.helper.make_node(
'Transpose',
inputs=['data'],
outputs=['tdata'],
perm=[0, 2, 1, 3],
)
ti = onnx.helper.make_node(
'Transpose',
inputs=['indices'],
outputs=['tindices'],
perm=[0, 2, 1, 3]
)
node = onnx.helper.make_node(
'Gather',
inputs=['tdata', 'tindices'],
outputs=['y'],
axis=1,
)
return helper.make_graph(
[td, ti, node],
'test_gather',
[x, i],
[y],
)
@onnx_test
def unknown_test(): def unknown_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
...@@ -2031,7 +1914,7 @@ def unknown_test(): ...@@ -2031,7 +1914,7 @@ def unknown_test():
outputs=['3'] outputs=['3']
) )
graph_def = helper.make_graph( return helper.make_graph(
[node,node2], [node,node2],
'test-unknown', 'test-unknown',
[x,y], [x,y],
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <migraphx/operators.hpp> #include <migraphx/operators.hpp>
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/onnx.hpp> #include <migraphx/onnx.hpp>
#include "test.hpp" #include "test.hpp"
...@@ -1015,6 +1016,32 @@ TEST_CASE(transpose_test) ...@@ -1015,6 +1016,32 @@ TEST_CASE(transpose_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(transpose_gather_test)
{
migraphx::program p;
auto make_contiguous = [&p](migraphx::instruction_ref ins) {
if(ins->get_shape().standard())
{
return ins;
}
return p.add_instruction(migraphx::op::contiguous{}, ins);
};
auto data = p.add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 5, 4, 6}});
auto ind =
p.add_parameter("indices", migraphx::shape{migraphx::shape::int32_type, {2, 4, 3, 5}});
auto tr_data = p.add_instruction(migraphx::op::transpose{{0, 2, 1, 3}}, data);
auto tr_ind = p.add_instruction(migraphx::op::transpose{{0, 2, 1, 3}}, ind);
int axis = 1;
p.add_instruction(
migraphx::op::gather{axis}, make_contiguous(tr_data), make_contiguous(tr_ind));
auto prog = migraphx::parse_onnx("transpose_gather_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(unknown_test) TEST_CASE(unknown_test)
{ {
migraphx::program p; migraphx::program p;
......
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
def tf_test(op_test):
def run_test():
g1 = tf.Graph()
op_test(g1)
tf.io.write_graph(g1, '.', '{}.pb'.format(op_test.__name__), as_text=False)
return run_test
def add_test(g1=tf.Graph()): @tf_test
def add_test(g1):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '1') g2_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '1')
tf.add(g1_input, g2_input, name = 'add1') tf.add(g1_input, g2_input, name = 'add1')
tf.train.write_graph(g1, '.', 'add_test.pb', as_text=False) @tf_test
def add_bcast_test(g1):
def add_bcast_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(2,3), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(2,1), name = '1') g2_input = tf.placeholder(tf.float32, shape=(2,1), name = '1')
tf.math.add(g1_input, g2_input, name = 'add_bcast1') tf.math.add(g1_input, g2_input, name = 'add_bcast1')
tf.train.write_graph(g1, '.', 'add_bcast_test.pb', as_text=False) @tf_test
def assert_less_equal_test(g1):
def assert_less_equal_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(2,3), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(2,3), name = '1') g2_input = tf.placeholder(tf.float32, shape=(2,3), name = '1')
with tf.control_dependencies([tf.assert_less_equal(g1_input, g2_input)]): with tf.control_dependencies([tf.assert_less_equal(g1_input, g2_input)]):
tf.add(g1_input, g2_input, name = 'add1') tf.add(g1_input, g2_input, name = 'add1')
tf.train.write_graph(g1, '.', 'assert_less_equal_test.pb', as_text=False)
def batchmatmul_test(g1=tf.Graph()): @tf_test
def batchmatmul_test(g1):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,2,8,4), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,2,8,4), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(1,2,4,8), name = '1') g2_input = tf.placeholder(tf.float32, shape=(1,2,4,8), name = '1')
tf.matmul(g1_input, g2_input, transpose_a=True, transpose_b=True, name='batchmatmul1') tf.matmul(g1_input, g2_input, transpose_a=True, transpose_b=True, name='batchmatmul1')
tf.train.write_graph(g1, '.', 'batchmatmul_test.pb', as_text=False) @tf_test
def batchnorm_test(g1):
def batchnorm_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 32), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 32), name = '0')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name = '1') g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name = '1')
...@@ -43,271 +46,241 @@ def batchnorm_test(g1=tf.Graph()): ...@@ -43,271 +46,241 @@ def batchnorm_test(g1=tf.Graph()):
g1_mean = tf.placeholder(tf.float32, shape=(32), name = '3') g1_mean = tf.placeholder(tf.float32, shape=(32), name = '3')
g1_variance = tf.placeholder(tf.float32, shape=(32), name = '4') g1_variance = tf.placeholder(tf.float32, shape=(32), name = '4')
tf.nn.fused_batch_norm( tf.nn.fused_batch_norm(
g1_input, g1_scale, g1_offset, g1_mean, g1_variance, g1_input, g1_scale, g1_offset, g1_mean, g1_variance,
epsilon=0.00001, is_training=False, name='batchnorm1') epsilon=0.00001, is_training=False, name='batchnorm1'
)
tf.train.write_graph(g1, '.', 'batchnorm_test.pb', as_text=False) @tf_test
def biasadd_test(g1):
def biasadd_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,1,1,500), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,1,1,500), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(500), name = '1') g2_input = tf.placeholder(tf.float32, shape=(500), name = '1')
tf.nn.bias_add(g1_input, g2_input, name = 'bias_add1') tf.nn.bias_add(g1_input, g2_input, name = 'bias_add1')
tf.train.write_graph(g1, '.', 'biasadd_test.pb', as_text=False) @tf_test
def cast_test(g1):
def cast_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0')
tf.cast(g1_input, dtype=tf.int32, name='cast1') tf.cast(g1_input, dtype=tf.int32, name='cast1')
tf.train.write_graph(g1, '.', 'cast_test.pb', as_text=False) @tf_test
def concat_test(g1=tf.Graph()): def concat_test(g1):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(4,7,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(4,7,3), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(4,2,3), name = '1') g2_input = tf.placeholder(tf.float32, shape=(4,2,3), name = '1')
tf.concat([g1_input, g2_input], axis=1, name = 'concat1') tf.concat([g1_input, g2_input], axis=1, name = 'concat1')
tf.train.write_graph(g1, '.', 'concat_test.pb', as_text=False) @tf_test
def const_test(g1):
def const_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
tf.constant(1.0, dtype=tf.float32 ,name='constant1') tf.constant(1.0, dtype=tf.float32 ,name='constant1')
tf.train.write_graph(g1, '.', 'constant_test.pb', as_text=False) @tf_test
def conv_test(g1):
def conv_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,16,16,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,16,16,3), name = '0')
g1_weights = tf.constant(value=1.0, dtype=tf.float32, shape=(3,3,3,32), name = '1') g1_weights = tf.constant(value=1.0, dtype=tf.float32, shape=(3,3,3,32), name = '1')
tf.nn.conv2d(g1_input, g1_weights, [1,1,1,1], "SAME", name = 'conv1') tf.nn.conv2d(g1_input, g1_weights, [1,1,1,1], "SAME", name = 'conv1')
tf.train.write_graph(g1, '.', 'conv_test.pb', as_text=False) @tf_test
def depthwiseconv_test(g1):
def depthwiseconv_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,16,16,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,16,16,3), name = '0')
g1_weights = tf.constant(value=1.0, dtype=tf.float32, shape=(3,3,3,1), name = '1') g1_weights = tf.constant(value=1.0, dtype=tf.float32, shape=(3,3,3,1), name = '1')
tf.nn.depthwise_conv2d_native(g1_input, g1_weights, [1,1,1,1], "SAME", name = 'depthwiseconv1') tf.nn.depthwise_conv2d_native(g1_input, g1_weights, [1,1,1,1], "SAME", name = 'depthwiseconv1')
tf.train.write_graph(g1, '.', 'depthwise_conv_test.pb', as_text=False) @tf_test
def expanddims_test(g1):
def expanddims_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2,3,4), name = '0') g1_input = tf.placeholder(tf.float32, shape=(2,3,4), name = '0')
tf.expand_dims(g1_input, axis=-1, name='expanddims_neg') tf.expand_dims(g1_input, axis=-1, name='expanddims_neg')
tf.train.write_graph(g1, '.', 'expanddims_neg_test.pb', as_text=False) @tf_test
def gather_test(g1):
def gather_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2,4), name = '0') g1_input = tf.placeholder(tf.float32, shape=(2,4), name = '0')
tf.gather(g1_input, [1,1], axis=1, name='gather1') tf.gather(g1_input, [1,1], axis=1, name='gather1')
tf.train.write_graph(g1, '.', 'gather_test.pb', as_text=False) @tf_test
def identity_test(g1):
def identity_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0')
tf.identity(g1_input, 'identity') tf.identity(g1_input, 'identity')
tf.train.write_graph(g1, '.', 'identity_test.pb', as_text=False) @tf_test
def matmul_test(g1):
def matmul_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(8,4), name = '0') g1_input = tf.placeholder(tf.float32, shape=(8,4), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(4,8), name = '1') g2_input = tf.placeholder(tf.float32, shape=(4,8), name = '1')
tf.matmul(g1_input, g2_input, transpose_a=True, transpose_b=True, name='matmul1') tf.matmul(g1_input, g2_input, transpose_a=True, transpose_b=True, name='matmul1')
tf.train.write_graph(g1, '.', 'matmul_test.pb', as_text=False) @tf_test
def mean_test(g1):
def mean_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0')
tf.math.reduce_mean( tf.math.reduce_mean(
g1_input, g1_input,
axis=(2,3), axis=(2,3),
keepdims=True, keepdims=True,
name='mean1' name='mean1'
) )
tf.math.reduce_mean( tf.math.reduce_mean(
g1_input, g1_input,
axis=(2,3), axis=(2,3),
keepdims=False, keepdims=False,
name='mean2' name='mean2'
) )
tf.train.write_graph(g1, '.', 'mean_test.pb', as_text=False) @tf_test
def mean_test_nhwc(g1):
def mean_test_nhwc(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,16,16,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,16,16,3), name = '0')
tf.math.reduce_mean( tf.math.reduce_mean(
g1_input, g1_input,
axis=(1,2), axis=(1,2),
keepdims=True, keepdims=True,
name='mean1' name='mean1'
) )
tf.math.reduce_mean( tf.math.reduce_mean(
g1_input, g1_input,
axis=(1,2), axis=(1,2),
keepdims=False, keepdims=False,
name='mean2' name='mean2'
) )
tf.train.write_graph(g1, '.', 'mean_test_nhwc.pb', as_text=False)
def mul_test(g1=tf.Graph()): @tf_test
def mul_test(g1):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,1,1,16), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,1,1,16), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(1,1,1,16), name = '1') g2_input = tf.placeholder(tf.float32, shape=(1,1,1,16), name = '1')
tf.multiply(g1_input, g2_input, name='mul1') tf.multiply(g1_input, g2_input, name='mul1')
tf.train.write_graph(g1, '.', 'mul_test.pb', as_text=False)
def pack_test(g1=tf.Graph()): @tf_test
def pack_test(g1):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2), name = '0') g1_input = tf.placeholder(tf.float32, shape=(2), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(2), name = '1') g2_input = tf.placeholder(tf.float32, shape=(2), name = '1')
g3_input = tf.placeholder(tf.float32, shape=(2), name = '2') g3_input = tf.placeholder(tf.float32, shape=(2), name = '2')
tf.stack([g1_input, g2_input, g3_input], axis=1, name = 'pack1') tf.stack([g1_input, g2_input, g3_input], axis=1, name = 'pack1')
tf.train.write_graph(g1, '.', 'pack_test.pb', as_text=False) @tf_test
def pack_test_nhwc(g1):
def pack_test_nhwc(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,1,1,2), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,1,1,2), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(1,1,1,2), name = '1') g2_input = tf.placeholder(tf.float32, shape=(1,1,1,2), name = '1')
g3_input = tf.placeholder(tf.float32, shape=(1,1,1,2), name = '2') g3_input = tf.placeholder(tf.float32, shape=(1,1,1,2), name = '2')
tf.stack([g1_input, g2_input, g3_input], axis=3, name = 'pack1') tf.stack([g1_input, g2_input, g3_input], axis=3, name = 'pack1')
tf.train.write_graph(g1, '.', 'pack_test_nhwc.pb', as_text=False) @tf_test
def pooling_test(g1):
def pooling_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,16,16,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,16,16,3), name = '0')
tf.nn.avg_pool( tf.nn.avg_pool(
value=g1_input, value=g1_input,
ksize=(1,2,2,1), ksize=(1,2,2,1),
strides=(1,2,2,1), strides=(1,2,2,1),
padding='VALID', padding='VALID',
data_format='NHWC', data_format='NHWC',
name='avg_pooling' name='avg_pooling'
) )
tf.nn.max_pool( tf.nn.max_pool(
value=g1_input, value=g1_input,
ksize=(1,2,2,1), ksize=(1,2,2,1),
strides=(1,2,2,1), strides=(1,2,2,1),
padding='VALID', padding='VALID',
data_format='NHWC', data_format='NHWC',
name='max_pooling' name='max_pooling'
) )
tf.train.write_graph(g1, '.', 'pooling_test.pb', as_text=False) @tf_test
def pow_test(g1):
def pow_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '1') g2_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '1')
tf.pow(g1_input, g2_input, name = 'pow1') tf.pow(g1_input, g2_input, name = 'pow1')
tf.train.write_graph(g1, '.', 'pow_test.pb', as_text=False) @tf_test
def relu_test(g1):
def relu_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0')
tf.nn.relu(g1_input, 'relu') tf.nn.relu(g1_input, 'relu')
tf.train.write_graph(g1, '.', 'relu_test.pb', as_text=False) @tf_test
def relu6_test(g1):
def relu6_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0')
tf.nn.relu6(g1_input, 'relu6') tf.nn.relu6(g1_input, 'relu6')
tf.train.write_graph(g1, '.', 'relu6_test.pb', as_text=False) @tf_test
def reshape_test(g1):
def reshape_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(16), name = '0') g1_input = tf.placeholder(tf.float32, shape=(16), name = '0')
tf.reshape(g1_input, (1,1,1,16), 'reshape') tf.reshape(g1_input, (1,1,1,16), 'reshape')
tf.train.write_graph(g1, '.', 'reshape_test.pb', as_text=False) @tf_test
def rsqrt_test(g1):
def rsqrt_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0')
tf.math.rsqrt(g1_input, 'rsqrt') tf.math.rsqrt(g1_input, 'rsqrt')
tf.train.write_graph(g1, '.', 'rsqrt_test.pb', as_text=False) @tf_test
def slice_test(g1):
def slice_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(5,10), name = '0') g1_input = tf.placeholder(tf.float32, shape=(5,10), name = '0')
tf.slice(g1_input, [1, 0], [2, -1], name = 'slice1') tf.slice(g1_input, [1, 0], [2, -1], name = 'slice1')
tf.train.write_graph(g1, '.', 'slice_test.pb', as_text=False) @tf_test
def softmax_test(g1):
def softmax_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,3), name = '0')
tf.nn.softmax(g1_input, name='softmax') tf.nn.softmax(g1_input, name='softmax')
tf.train.write_graph(g1, '.', 'softmax_test.pb', as_text=False) @tf_test
def sqdiff_test(g1):
def sqdiff_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '1') g2_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '1')
tf.squared_difference(g1_input, g2_input, name = 'sqdiff') tf.squared_difference(g1_input, g2_input, name = 'sqdiff')
tf.train.write_graph(g1, '.', 'sqdiff_test.pb', as_text=False) @tf_test
def squeeze_test(g1):
def squeeze_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,2,3,1), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,2,3,1), name = '0')
tf.squeeze(g1_input, name='squeeze') tf.squeeze(g1_input, name='squeeze')
tf.train.write_graph(g1, '.', 'squeeze_test.pb', as_text=False) @tf_test
def stopgradient_test(g1):
def stopgradient_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0')
tf.stop_gradient(g1_input, 'stopgradient') tf.stop_gradient(g1_input, 'stopgradient')
tf.train.write_graph(g1, '.', 'stopgradient_test.pb', as_text=False) @tf_test
def stridedslice_test(g1):
def stridedslice_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,1,1,10), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,1,1,10), name = '0')
tf.strided_slice(g1_input, [0, 0, 0, 0], [1, 1, 1, 5], [1,1,1,1], shrink_axis_mask=2, name = 'stridedslice1') tf.strided_slice(g1_input, [0, 0, 0, 0], [1, 1, 1, 5], [1,1,1,1], shrink_axis_mask=2, name = 'stridedslice1')
tf.train.write_graph(g1, '.', 'stridedslice_test.pb', as_text=False) @tf_test
def stridedslice_masks_test(g1):
def stridedslice_masks_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,3,3,10), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,3,3,10), name = '0')
tf.strided_slice(g1_input, [0, 1, 1, 0], [0, 0, 0, 0], [1,1,1,1], begin_mask=9, end_mask=15, name = 'stridedslice1') tf.strided_slice(g1_input, [0, 1, 1, 0], [0, 0, 0, 0], [1,1,1,1], begin_mask=9, end_mask=15, name = 'stridedslice1')
tf.train.write_graph(g1, '.', 'stridedslice_masks_test.pb', as_text=False) @tf_test
def sub_test(g1):
def sub_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '0')
g2_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '1') g2_input = tf.placeholder(tf.float32, shape=(1,2,2,3), name = '1')
tf.subtract(g1_input, g2_input, name = 'sub1') tf.subtract(g1_input, g2_input, name = 'sub1')
tf.train.write_graph(g1, '.', 'sub_test.pb', as_text=False) @tf_test
def tanh_test(g1):
def tanh_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0')
tf.tanh(g1_input, 'tanh') tf.tanh(g1_input, 'tanh')
tf.train.write_graph(g1, '.', 'tanh_test.pb', as_text=False) @tf_test
def transpose_test(g1):
def transpose_test(g1=tf.Graph()):
with g1.as_default(): with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0') g1_input = tf.placeholder(tf.float32, shape=(1,3,16,16), name = '0')
tf.transpose(g1_input, perm=[0,2,3,1], name = 'transpose') tf.transpose(g1_input, perm=[0,2,3,1], name = 'transpose')
tf.train.write_graph(g1, '.', 'transpose_test.pb', as_text=False)
...@@ -48,6 +48,22 @@ TEST_CASE(add_bcast_test) ...@@ -48,6 +48,22 @@ TEST_CASE(add_bcast_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(assert_less_equal_test)
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::float_type, {2, 3}};
auto l0 = p.add_parameter("0", s0);
auto l1 = p.add_parameter("1", s0);
migraphx::literal l{migraphx::shape{migraphx::shape::int32_type, {2}}, {0, 1}};
auto l2 = p.add_literal(l);
p.add_instruction(migraphx::op::add{}, l0, l1);
auto l3 = p.add_instruction(migraphx::op::identity{}, l0, l1);
p.add_instruction(migraphx::op::identity{}, l3, l2);
auto prog = optimize_tf("assert_less_equal_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(batchmatmul_test) TEST_CASE(batchmatmul_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -100,6 +116,16 @@ TEST_CASE(biasadd_test) ...@@ -100,6 +116,16 @@ TEST_CASE(biasadd_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(cast_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::convert{migraphx::shape::int32_type}, l0);
auto prog = optimize_tf("cast_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(concat_test) TEST_CASE(concat_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -118,16 +144,6 @@ TEST_CASE(concat_test) ...@@ -118,16 +144,6 @@ TEST_CASE(concat_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(cast_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::convert{migraphx::shape::int32_type}, l0);
auto prog = optimize_tf("cast_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(const_test) TEST_CASE(const_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -271,9 +287,10 @@ TEST_CASE(mean_test_nhwc) ...@@ -271,9 +287,10 @@ TEST_CASE(mean_test_nhwc)
migraphx::program p; migraphx::program p;
migraphx::literal l{migraphx::shape{migraphx::shape::int32_type, {2}}, {1, 2}}; migraphx::literal l{migraphx::shape{migraphx::shape::int32_type, {2}}, {1, 2}};
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
migraphx::op::reduce_mean op{{2, 3}}; auto l1 = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, l0);
auto l3 = p.add_instruction(op, l0); migraphx::op::reduce_mean op{{1, 2}};
p.add_instruction(migraphx::op::squeeze{{2, 3}}, l3); auto l2 = p.add_instruction(op, l1);
p.add_instruction(migraphx::op::squeeze{{1, 2}}, l2);
auto prog = optimize_tf("mean_test_nhwc.pb", true); auto prog = optimize_tf("mean_test_nhwc.pb", true);
EXPECT(p == prog); EXPECT(p == prog);
...@@ -291,6 +308,23 @@ TEST_CASE(mul_test) ...@@ -291,6 +308,23 @@ TEST_CASE(mul_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(onehot_test)
{
migraphx::program p;
auto l0 = p.add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::int32_type, {5}}, {1, 1, 1, 1, 1}});
p.add_literal(2);
p.add_literal(1.0f);
p.add_literal(0.0f);
auto l1 = p.add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::float_type, {2, 2}}, {1, 0, 0, 1}});
int axis = 0;
p.add_instruction(migraphx::op::gather{axis}, l1, l0);
auto prog = optimize_tf("onehot_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(pack_test) TEST_CASE(pack_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -475,20 +509,44 @@ TEST_CASE(stridedslice_test) ...@@ -475,20 +509,44 @@ TEST_CASE(stridedslice_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 10, 1, 1}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 10, 1, 1}});
auto l1 = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, l0);
std::size_t num_axes = 4; std::size_t num_axes = 4;
migraphx::op::slice op; migraphx::op::slice op;
op.starts = {0, 0, 0, 0}; op.starts = {0, 0, 0, 0};
op.ends = {1, 1, 1, 5}; op.ends = {1, 1, 1, 5};
op.axes = std::vector<int64_t>(num_axes); op.axes = std::vector<int64_t>(num_axes);
std::iota(op.axes.begin(), op.axes.end(), 0); std::iota(op.axes.begin(), op.axes.end(), 0);
auto l1 = p.add_instruction(op, l0); auto l2 = p.add_instruction(op, l1);
auto shrink_axis = 1; auto shrink_axis = 1;
p.add_instruction(migraphx::op::squeeze{{shrink_axis}}, l1); p.add_instruction(migraphx::op::squeeze{{shrink_axis}}, l2);
auto prog = optimize_tf("stridedslice_test.pb", true); auto prog = optimize_tf("stridedslice_test.pb", true);
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(stridedslice_masks_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 10, 3, 3}});
std::size_t num_axes = 4;
migraphx::op::slice op;
op.starts = {0, 1, 1, 0};
op.ends = {1, 3, 3, 10};
op.axes = std::vector<int64_t>(num_axes);
std::iota(op.axes.begin(), op.axes.end(), 0);
// add literals for starts, ends, and strides in tf (NHWC format)
p.add_literal(migraphx::shape{migraphx::shape::int32_type, {4}}, std::vector<int>{0, 1, 1, 0});
p.add_literal(migraphx::shape{migraphx::shape::int32_type, {4}}, std::vector<int>{0, 0, 0, 0});
p.add_literal(migraphx::shape{migraphx::shape::int32_type, {4}}, std::vector<int>{1, 1, 1, 1});
auto l1 = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, l0);
auto l2 = p.add_instruction(op, l1);
p.add_instruction(migraphx::op::transpose{{0, 3, 1, 2}}, l2);
auto prog = migraphx::parse_tf("stridedslice_masks_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(sub_test) TEST_CASE(sub_test)
{ {
migraphx::program p; migraphx::program p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment