Commit f94d77fc authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into mi100_opts

parents 03929873 6403d482
#ifndef MIGRAPHX_GUARD_RTGLIB_SCATTER_HPP
#define MIGRAPHX_GUARD_RTGLIB_SCATTER_HPP
#include <migraphx/argument.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/op/scatter.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct context;
struct hip_scatter
{
op::scatter op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::scatter"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
return shapes.size() - 1;
}
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -163,6 +163,7 @@ struct miopen_apply
add_extend_op("lrn");
add_extend_op("pad");
add_extend_op("pooling");
add_extend_op("prefix_scan_sum");
add_extend_op("reduce_max");
add_extend_op("reduce_mean");
add_extend_op("reduce_min");
......@@ -172,6 +173,7 @@ struct miopen_apply
add_extend_op("rnn_var_sl_last_output");
add_extend_op("rnn_var_sl_shift_output");
add_extend_op("rnn_var_sl_shift_sequence");
add_extend_op("scatter");
add_extend_op("softmax");
add_gemm_op<op::dot>("dot");
......
#include <migraphx/gpu/scatter.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/device/scatter.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_scatter::compute_shape(std::vector<shape> inputs) const
{
inputs.pop_back();
return op.normalize_compute_shape(inputs);
}
argument hip_scatter::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
return device::scatter(ctx.get_stream().get(), args.back(), args[0], args[1], args[2], op.axis);
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -14,11 +14,13 @@
#include <migraphx/insert_pad.hpp>
#include <migraphx/memory_coloring.hpp>
#include <migraphx/normalize_ops.hpp>
#include <migraphx/preallocate_param.hpp>
#include <migraphx/propagate_constant.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/remap.hpp>
#include <migraphx/rewrite_batchnorm.hpp>
#include <migraphx/rewrite_pooling.hpp>
#include <migraphx/rewrite_quantization.hpp>
#include <migraphx/rewrite_rnn.hpp>
#include <migraphx/schedule.hpp>
#include <migraphx/simplify_algebra.hpp>
......@@ -31,7 +33,6 @@
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/mlir_conv.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/preallocate_param.hpp>
#include <migraphx/gpu/schedule_model.hpp>
#include <migraphx/gpu/sync_device.hpp>
#include <migraphx/gpu/target.hpp>
......@@ -59,6 +60,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
normalize_ops{},
decompose{},
dead_code_elimination{},
rewrite_quantization{},
dead_code_elimination{},
eliminate_data_type{unsupported_types, shape::type_t::float_type},
simplify_reshapes{},
eliminate_identity{},
......@@ -98,7 +101,7 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
schedule{gpu::schedule_model{ctx.get_current_device().nstreams()}, not enabled(MIGRAPHX_DISABLE_SCHEDULE_PASS{})},
memory_coloring{"hip::allocate"},
sync_device{},
preallocate_param{"scratch", &ctx},
preallocate_param{"scratch", gpu_allocation_model{}},
dead_code_elimination{},
eliminate_workspace{},
eliminate_allocation{"hip::allocate"},
......
......@@ -17,6 +17,7 @@
#include <migraphx/instruction.hpp>
#include <migraphx/config.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/common.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/tf/tf_parser.hpp>
......@@ -74,66 +75,11 @@ instruction_ref tf_parser::node_info::make_contiguous(instruction_ref ins) const
return mm->add_instruction(make_op("contiguous"), ins);
}
std::vector<std::size_t> compute_broadcasted_lens(std::vector<std::size_t> s0,
std::vector<std::size_t> s1)
{
// Example:
// s0 = (3,2,4,5) and s1 = (2,1,1)
//
// In this case we need to broadcast (:,1,1) portion of
// s1 plus broadcast the 1st dimension of s1
// giving output_lens = (3,2,4,5)
//
// Another example:
// s0 = (3,2,1,5) and s1 = (2,7,5)
// In this case we need to broadcast the (:,:,1:,:) axis
// of s0 plus the 1st dimension of s1 giving
// output_lens = (3,2,7,5)
if(s0.size() > s1.size())
{
s0.swap(s1);
}
std::vector<std::size_t> out_lens(s1);
auto offset = s1.size() - s0.size();
std::transform(
s0.begin(), s0.end(), s1.begin() + offset, out_lens.begin() + offset, [&](auto a, auto b) {
if(a != b and a != 1 and b != 1)
{
MIGRAPHX_THROW("COMPUTE_BROADCASTLEN: shape {" + to_string_range(s0) + "} and {" +
to_string_range(s1) + "} mismatch!");
}
return std::max(a, b);
});
return out_lens;
}
instruction_ref tf_parser::node_info::add_broadcastable_binary_op(const std::string& op_name,
instruction_ref arg0,
instruction_ref arg1) const
{
if(arg0->get_shape().lens() != arg1->get_shape().lens())
{
// Get lengths for both arguments
auto s0 = arg0->get_shape().lens();
auto s1 = arg1->get_shape().lens();
auto out_lens = compute_broadcasted_lens(s0, s1);
auto l0 = arg0;
if(arg0->get_shape().lens() != out_lens)
l0 = add_instruction(make_op("multibroadcast", {{"output_lens", out_lens}}), arg0);
auto l1 = arg1;
if(arg1->get_shape().lens() != out_lens)
l1 = add_instruction(make_op("multibroadcast", {{"output_lens", out_lens}}), arg1);
return add_instruction(make_op(op_name), l0, l1);
}
else
{
return add_instruction(make_op(op_name), {arg0, arg1});
}
return add_common_op(*mm, make_op(op_name), {arg0, arg1});
}
int64_t tf_parser::parse_axis(const int64_t dim, const size_t num_dims) const
......
......@@ -131,4 +131,32 @@ TEST_CASE(non_standard_return_input)
EXPECT(std::distance(m.begin(), m.end()) == count);
}
TEST_CASE(non_standard_flatten_op)
{
migraphx::module m;
auto l = m.add_parameter("x", {migraphx::shape::float_type, {2, 6, 6, 6}});
auto t = m.add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), l);
auto c = m.add_instruction(migraphx::make_op("contiguous"), t);
m.add_instruction(migraphx::make_op("flatten"), c);
auto count = std::distance(m.begin(), m.end());
run_pass(m);
EXPECT(std::distance(m.begin(), m.end()) == count);
}
TEST_CASE(standard_flatten_op)
{
migraphx::module m;
auto l = m.add_parameter("x", {migraphx::shape::float_type, {2, 6, 6, 6}});
auto t = m.add_instruction(
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), l);
auto c = m.add_instruction(migraphx::make_op("contiguous"), t);
m.add_instruction(migraphx::make_op("flatten"), c);
auto count = std::distance(m.begin(), m.end());
run_pass(m);
EXPECT(std::distance(m.begin(), m.end()) == (count - 1));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -6,6 +6,7 @@
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <migraphx/gpu/compile_hip_code_object.hpp>
......@@ -74,19 +75,10 @@ migraphx::src_file make_src_file(const std::string& name, const std::string& con
return {name, std::make_pair(content.data(), content.data() + content.size())};
}
std::string get_device_name()
{
hipDeviceProp_t props{};
int device;
EXPECT(hipGetDevice(&device) == hipSuccess);
EXPECT(hipGetDeviceProperties(&props, device) == hipSuccess);
return "gfx" + std::to_string(props.gcnArch);
}
TEST_CASE(simple_compile_hip)
{
auto binaries = migraphx::gpu::compile_hip_src(
{make_src_file("main.cpp", write_2s)}, "", get_device_name());
{make_src_file("main.cpp", write_2s)}, "", migraphx::gpu::get_device_name());
EXPECT(binaries.size() == 1);
migraphx::argument input{{migraphx::shape::int8_type, {5}}};
......@@ -103,7 +95,7 @@ TEST_CASE(simple_compile_hip)
TEST_CASE(code_object_hip)
{
auto binaries = migraphx::gpu::compile_hip_src(
{make_src_file("main.cpp", add_2s_binary)}, "", get_device_name());
{make_src_file("main.cpp", add_2s_binary)}, "", migraphx::gpu::get_device_name());
EXPECT(binaries.size() == 1);
migraphx::shape input{migraphx::shape::int8_type, {5}};
......
......@@ -6,6 +6,7 @@
#include <functional>
#include <iostream>
#include <sstream>
#include <type_traits>
#include <unordered_map>
#include <vector>
......@@ -83,8 +84,8 @@ struct function
}
};
template <class Iterator>
inline std::ostream& stream_range(std::ostream& s, Iterator start, Iterator last)
template <class Stream, class Iterator>
inline Stream& stream_range(Stream& s, Iterator start, Iterator last)
{
if(start != last)
{
......@@ -94,22 +95,17 @@ inline std::ostream& stream_range(std::ostream& s, Iterator start, Iterator last
return s;
}
inline std::ostream& operator<<(std::ostream& s, std::nullptr_t)
template <class Stream>
inline Stream& operator<<(Stream& s, std::nullptr_t)
{
s << "nullptr";
return s;
}
template <class T>
inline std::ostream& operator<<(std::ostream& s, const std::vector<T>& v)
{
s << "{ ";
stream_range(s, v.begin(), v.end());
s << "}";
return s;
}
inline std::ostream& operator<<(std::ostream& s, const std::vector<bool>& v)
template <class Stream,
class Range,
class = typename std::enable_if<not std::is_convertible<Range, std::string>{}>::type>
inline auto operator<<(Stream& s, const Range& v) -> decltype(stream_range(s, v.begin(), v.end()))
{
s << "{ ";
stream_range(s, v.begin(), v.end());
......
......@@ -23,4 +23,4 @@



B
\ No newline at end of file
B
\ No newline at end of file
......@@ -23,4 +23,4 @@



B
\ No newline at end of file
B
\ No newline at end of file
dequantizelinear_test:
dequantizelinear_test:k

0
1
2out"DequantizeLineardequantizelinear_testZ
1out"DequantizeLineardequantizelinear_testZ
0

......@@ -10,12 +9,8 @@
1

Z
2

b
out

B
\ No newline at end of file
B
\ No newline at end of file
 dequantizelinear_zero_point_test:
0
1
2out"DequantizeLinear dequantizelinear_zero_point_testZ
0

Z
1

Z
2

b
out

B
\ No newline at end of file
......@@ -4,8 +4,7 @@
import numpy as np
import onnx
from onnx import helper
from onnx import numpy_helper
from onnx import AttributeProto, TensorProto, GraphProto
from onnx import TensorProto
def onnx_test(op_test):
......@@ -483,7 +482,6 @@ def constant_fill_test():
@onnx_test
def constant_fill_input_as_shape_test():
np_shape = np.array([2, 3])
shape = helper.make_tensor_value_info('shape', TensorProto.INT32, [2])
value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3])
ts_shape = helper.make_tensor(name='shape_tensor',
......@@ -534,7 +532,6 @@ def constant_scalar_test():
def const_of_shape_empty_input_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
shape_val = np.array([2, 3, 4]).astype(np.int64)
empty_val = np.array([]).astype(np.int64)
empty_ts = helper.make_tensor(name='empty_tensor',
data_type=TensorProto.INT32,
......@@ -1021,6 +1018,21 @@ def deconv_stride_test():
@onnx_test
def dequantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.INT8, [5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT, [5])
node = onnx.helper.make_node(
'DequantizeLinear',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def dequantizelinear_zero_point_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.INT8, [5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1])
arg2 = helper.make_tensor_value_info('2', TensorProto.INT8, [1])
......@@ -1217,98 +1229,6 @@ def equal_bool_test():
return ([node1, node2], [x1, x2], [y])
@onnx_test
def greater_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Greater',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def greater_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Greater',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def less_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Less',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def less_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Less',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def lessorequal_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [3])
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'LessOrEqual',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x1, x2], [y])
@onnx_test
def erf_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
......@@ -1376,6 +1296,29 @@ def flatten_test():
return ([node, node2], [x], [y, y2])
@onnx_test
def flatten_nonstd_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 5, 4])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [6, 20])
y2 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [2, 60])
trans = helper.make_node(
'Transpose',
inputs=['0'],
outputs=['tx'],
perm=[0, 1, 3, 2],
)
node = onnx.helper.make_node('Flatten',
inputs=['tx'],
axis=2,
outputs=['2'])
node2 = onnx.helper.make_node('Flatten', inputs=['tx'], outputs=['3'])
return ([trans, node, node2], [x], [y, y2])
@onnx_test
def floor_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
......@@ -1491,6 +1434,23 @@ def gemm_ex_brcst_test():
return ([node], [m1, m2, m3], [y])
@onnx_test
def gemm_half_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [1, 1, 8, 6])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [1, 1, 8, 7])
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT16, [1, 1, 6, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [1, 1, 6, 7])
node = onnx.helper.make_node('Gemm',
inputs=['1', '2', '3'],
outputs=['y'],
alpha=0.5,
beta=0.8,
transA=1)
return ([node], [m1, m2, m3], [y])
@onnx_test
def globalavgpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16])
......@@ -1519,6 +1479,44 @@ def globalmaxpool_test():
return ([node], [x], [y])
@onnx_test
def greater_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Greater',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def greater_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Greater',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def group_conv_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 4, 16, 16])
......@@ -1595,8 +1593,6 @@ def if_literal_test():
onnx.TensorProto.FLOAT, [5])
else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT, [5])
empty_out = onnx.helper.make_tensor_value_info('empty_out',
onnx.TensorProto.FLOAT, [])
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
y = np.array([5, 4, 3, 2, 1]).astype(np.float32)
......@@ -2216,6 +2212,60 @@ def leaky_relu_test():
return ([node], [x], [y])
@onnx_test
def less_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Less',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def less_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Less',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def lessorequal_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [3])
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'LessOrEqual',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x1, x2], [y])
@onnx_test
def log_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
......@@ -2279,8 +2329,7 @@ def logsoftmax_test():
@onnx_test
def logsoftmax_nonstd_input_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [6, 9])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
node0 = onnx.helper.make_node('Slice',
inputs=['0'],
......@@ -2294,7 +2343,7 @@ def logsoftmax_nonstd_input_test():
outputs=['2'],
axis=-1)
return ([node0, node1], [x], [z])
return ([node0, node1], [x], [y])
@onnx_test
......@@ -2751,6 +2800,36 @@ def prelu_brcst_test():
@onnx_test
def quantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1])
arg_out = helper.make_tensor_value_info('out', TensorProto.INT8, [5])
node = onnx.helper.make_node(
'QuantizeLinear',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def quantizelinear_int32_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.INT32, [5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1])
arg_out = helper.make_tensor_value_info('out', TensorProto.INT8, [5])
node = onnx.helper.make_node(
'QuantizeLinear',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def quantizelinear_zero_point_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1])
arg2 = helper.make_tensor_value_info('2', TensorProto.INT8, [1])
......@@ -3145,8 +3224,6 @@ def reshape_test():
@onnx_test
def reshape_non_standard_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4])
trans_x = helper.make_tensor_value_info('trans_x', TensorProto.FLOAT,
[2, 4, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [4, 3, 2])
trans = helper.make_node(
......@@ -3356,6 +3433,25 @@ def resize_upsample_pc_test():
return ([node], [X], [Y], [scale_tensor])
@onnx_test
def scatter_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4, 5, 6])
i = helper.make_tensor_value_info('indices', TensorProto.INT32,
[2, 3, 4, 5])
u = helper.make_tensor_value_info('update', TensorProto.FLOAT,
[2, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5, 6])
node = onnx.helper.make_node(
'Scatter',
inputs=['data', 'indices', 'update'],
outputs=['y'],
axis=-2,
)
return ([node], [x, i, u], [y])
@onnx_test
def selu_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [2, 3])
......@@ -3389,7 +3485,6 @@ def shape_gather_test():
values = np.array([1])
# value = helper.make_tensor_value_info('value', TensorProto.INT32, [1])
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [7, 3, 10])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [3])
z = helper.make_tensor_value_info('z', TensorProto.FLOAT, [1])
value_tensor = helper.make_tensor(name='const_tensor',
......@@ -3696,8 +3791,7 @@ def softmax_test():
@onnx_test
def softmax_nonstd_input_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [6, 8])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
node0 = onnx.helper.make_node('Slice',
inputs=['0'],
......@@ -3708,7 +3802,7 @@ def softmax_nonstd_input_test():
node1 = onnx.helper.make_node('Softmax', inputs=['1'], outputs=['2'])
return ([node0, node1], [x], [z])
return ([node0, node1], [x], [y])
@onnx_test
......@@ -3811,8 +3905,7 @@ def squeeze_empty_axes_test():
def squeeze_unsqueeze_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, 1, 1, 2, 1])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 2])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT,
y = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, 3, 1, 2, 1])
node = onnx.helper.make_node('Squeeze',
......@@ -3825,7 +3918,7 @@ def squeeze_unsqueeze_test():
axes=[0, 1, 3, 5],
outputs=['2'])
return ([node, node2], [x], [z])
return ([node, node2], [x], [y])
@onnx_test
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment