Commit f94d77fc authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into mi100_opts

parents 03929873 6403d482
#ifndef MIGRAPHX_GUARD_RTGLIB_SCATTER_HPP
#define MIGRAPHX_GUARD_RTGLIB_SCATTER_HPP
#include <migraphx/argument.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/op/scatter.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct context;
struct hip_scatter
{
op::scatter op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::scatter"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
return shapes.size() - 1;
}
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -163,6 +163,7 @@ struct miopen_apply
add_extend_op("lrn");
add_extend_op("pad");
add_extend_op("pooling");
add_extend_op("prefix_scan_sum");
add_extend_op("reduce_max");
add_extend_op("reduce_mean");
add_extend_op("reduce_min");
......@@ -172,6 +173,7 @@ struct miopen_apply
add_extend_op("rnn_var_sl_last_output");
add_extend_op("rnn_var_sl_shift_output");
add_extend_op("rnn_var_sl_shift_sequence");
add_extend_op("scatter");
add_extend_op("softmax");
add_gemm_op<op::dot>("dot");
......
#include <migraphx/gpu/scatter.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/device/scatter.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_scatter::compute_shape(std::vector<shape> inputs) const
{
inputs.pop_back();
return op.normalize_compute_shape(inputs);
}
argument hip_scatter::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
return device::scatter(ctx.get_stream().get(), args.back(), args[0], args[1], args[2], op.axis);
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -14,11 +14,13 @@
#include <migraphx/insert_pad.hpp>
#include <migraphx/memory_coloring.hpp>
#include <migraphx/normalize_ops.hpp>
#include <migraphx/preallocate_param.hpp>
#include <migraphx/propagate_constant.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/remap.hpp>
#include <migraphx/rewrite_batchnorm.hpp>
#include <migraphx/rewrite_pooling.hpp>
#include <migraphx/rewrite_quantization.hpp>
#include <migraphx/rewrite_rnn.hpp>
#include <migraphx/schedule.hpp>
#include <migraphx/simplify_algebra.hpp>
......@@ -31,7 +33,6 @@
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/mlir_conv.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/preallocate_param.hpp>
#include <migraphx/gpu/schedule_model.hpp>
#include <migraphx/gpu/sync_device.hpp>
#include <migraphx/gpu/target.hpp>
......@@ -59,6 +60,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
normalize_ops{},
decompose{},
dead_code_elimination{},
rewrite_quantization{},
dead_code_elimination{},
eliminate_data_type{unsupported_types, shape::type_t::float_type},
simplify_reshapes{},
eliminate_identity{},
......@@ -98,7 +101,7 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
schedule{gpu::schedule_model{ctx.get_current_device().nstreams()}, not enabled(MIGRAPHX_DISABLE_SCHEDULE_PASS{})},
memory_coloring{"hip::allocate"},
sync_device{},
preallocate_param{"scratch", &ctx},
preallocate_param{"scratch", gpu_allocation_model{}},
dead_code_elimination{},
eliminate_workspace{},
eliminate_allocation{"hip::allocate"},
......
......@@ -17,6 +17,7 @@
#include <migraphx/instruction.hpp>
#include <migraphx/config.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/common.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/tf/tf_parser.hpp>
......@@ -74,66 +75,11 @@ instruction_ref tf_parser::node_info::make_contiguous(instruction_ref ins) const
return mm->add_instruction(make_op("contiguous"), ins);
}
std::vector<std::size_t> compute_broadcasted_lens(std::vector<std::size_t> s0,
std::vector<std::size_t> s1)
{
// Example:
// s0 = (3,2,4,5) and s1 = (2,1,1)
//
// In this case we need to broadcast (:,1,1) portion of
// s1 plus broadcast the 1st dimension of s1
// giving output_lens = (3,2,4,5)
//
// Another example:
// s0 = (3,2,1,5) and s1 = (2,7,5)
// In this case we need to broadcast the (:,:,1:,:) axis
// of s0 plus the 1st dimension of s1 giving
// output_lens = (3,2,7,5)
if(s0.size() > s1.size())
{
s0.swap(s1);
}
std::vector<std::size_t> out_lens(s1);
auto offset = s1.size() - s0.size();
std::transform(
s0.begin(), s0.end(), s1.begin() + offset, out_lens.begin() + offset, [&](auto a, auto b) {
if(a != b and a != 1 and b != 1)
{
MIGRAPHX_THROW("COMPUTE_BROADCASTLEN: shape {" + to_string_range(s0) + "} and {" +
to_string_range(s1) + "} mismatch!");
}
return std::max(a, b);
});
return out_lens;
}
instruction_ref tf_parser::node_info::add_broadcastable_binary_op(const std::string& op_name,
instruction_ref arg0,
instruction_ref arg1) const
{
if(arg0->get_shape().lens() != arg1->get_shape().lens())
{
// Get lengths for both arguments
auto s0 = arg0->get_shape().lens();
auto s1 = arg1->get_shape().lens();
auto out_lens = compute_broadcasted_lens(s0, s1);
auto l0 = arg0;
if(arg0->get_shape().lens() != out_lens)
l0 = add_instruction(make_op("multibroadcast", {{"output_lens", out_lens}}), arg0);
auto l1 = arg1;
if(arg1->get_shape().lens() != out_lens)
l1 = add_instruction(make_op("multibroadcast", {{"output_lens", out_lens}}), arg1);
return add_instruction(make_op(op_name), l0, l1);
}
else
{
return add_instruction(make_op(op_name), {arg0, arg1});
}
return add_common_op(*mm, make_op(op_name), {arg0, arg1});
}
int64_t tf_parser::parse_axis(const int64_t dim, const size_t num_dims) const
......
......@@ -131,4 +131,32 @@ TEST_CASE(non_standard_return_input)
EXPECT(std::distance(m.begin(), m.end()) == count);
}
TEST_CASE(non_standard_flatten_op)
{
migraphx::module m;
auto l = m.add_parameter("x", {migraphx::shape::float_type, {2, 6, 6, 6}});
auto t = m.add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), l);
auto c = m.add_instruction(migraphx::make_op("contiguous"), t);
m.add_instruction(migraphx::make_op("flatten"), c);
auto count = std::distance(m.begin(), m.end());
run_pass(m);
EXPECT(std::distance(m.begin(), m.end()) == count);
}
TEST_CASE(standard_flatten_op)
{
migraphx::module m;
auto l = m.add_parameter("x", {migraphx::shape::float_type, {2, 6, 6, 6}});
auto t = m.add_instruction(
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), l);
auto c = m.add_instruction(migraphx::make_op("contiguous"), t);
m.add_instruction(migraphx::make_op("flatten"), c);
auto count = std::distance(m.begin(), m.end());
run_pass(m);
EXPECT(std::distance(m.begin(), m.end()) == (count - 1));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -6,6 +6,7 @@
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <migraphx/gpu/compile_hip_code_object.hpp>
......@@ -74,19 +75,10 @@ migraphx::src_file make_src_file(const std::string& name, const std::string& con
return {name, std::make_pair(content.data(), content.data() + content.size())};
}
std::string get_device_name()
{
hipDeviceProp_t props{};
int device;
EXPECT(hipGetDevice(&device) == hipSuccess);
EXPECT(hipGetDeviceProperties(&props, device) == hipSuccess);
return "gfx" + std::to_string(props.gcnArch);
}
TEST_CASE(simple_compile_hip)
{
auto binaries = migraphx::gpu::compile_hip_src(
{make_src_file("main.cpp", write_2s)}, "", get_device_name());
{make_src_file("main.cpp", write_2s)}, "", migraphx::gpu::get_device_name());
EXPECT(binaries.size() == 1);
migraphx::argument input{{migraphx::shape::int8_type, {5}}};
......@@ -103,7 +95,7 @@ TEST_CASE(simple_compile_hip)
TEST_CASE(code_object_hip)
{
auto binaries = migraphx::gpu::compile_hip_src(
{make_src_file("main.cpp", add_2s_binary)}, "", get_device_name());
{make_src_file("main.cpp", add_2s_binary)}, "", migraphx::gpu::get_device_name());
EXPECT(binaries.size() == 1);
migraphx::shape input{migraphx::shape::int8_type, {5}};
......
......@@ -6,6 +6,7 @@
#include <functional>
#include <iostream>
#include <sstream>
#include <type_traits>
#include <unordered_map>
#include <vector>
......@@ -83,8 +84,8 @@ struct function
}
};
template <class Iterator>
inline std::ostream& stream_range(std::ostream& s, Iterator start, Iterator last)
template <class Stream, class Iterator>
inline Stream& stream_range(Stream& s, Iterator start, Iterator last)
{
if(start != last)
{
......@@ -94,22 +95,17 @@ inline std::ostream& stream_range(std::ostream& s, Iterator start, Iterator last
return s;
}
inline std::ostream& operator<<(std::ostream& s, std::nullptr_t)
template <class Stream>
inline Stream& operator<<(Stream& s, std::nullptr_t)
{
s << "nullptr";
return s;
}
template <class T>
inline std::ostream& operator<<(std::ostream& s, const std::vector<T>& v)
{
s << "{ ";
stream_range(s, v.begin(), v.end());
s << "}";
return s;
}
inline std::ostream& operator<<(std::ostream& s, const std::vector<bool>& v)
template <class Stream,
class Range,
class = typename std::enable_if<not std::is_convertible<Range, std::string>{}>::type>
inline auto operator<<(Stream& s, const Range& v) -> decltype(stream_range(s, v.begin(), v.end()))
{
s << "{ ";
stream_range(s, v.begin(), v.end());
......
......@@ -23,4 +23,4 @@



B
\ No newline at end of file
B
\ No newline at end of file
......@@ -23,4 +23,4 @@



B
\ No newline at end of file
B
\ No newline at end of file
dequantizelinear_test:
dequantizelinear_test:k

0
1
2out"DequantizeLineardequantizelinear_testZ
1out"DequantizeLineardequantizelinear_testZ
0

......@@ -10,12 +9,8 @@
1

Z
2

b
out

B
\ No newline at end of file
B
\ No newline at end of file
 dequantizelinear_zero_point_test:
0
1
2out"DequantizeLinear dequantizelinear_zero_point_testZ
0

Z
1

Z
2

b
out

B
\ No newline at end of file
......@@ -4,8 +4,7 @@
import numpy as np
import onnx
from onnx import helper
from onnx import numpy_helper
from onnx import AttributeProto, TensorProto, GraphProto
from onnx import TensorProto
def onnx_test(op_test):
......@@ -483,7 +482,6 @@ def constant_fill_test():
@onnx_test
def constant_fill_input_as_shape_test():
np_shape = np.array([2, 3])
shape = helper.make_tensor_value_info('shape', TensorProto.INT32, [2])
value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3])
ts_shape = helper.make_tensor(name='shape_tensor',
......@@ -534,7 +532,6 @@ def constant_scalar_test():
def const_of_shape_empty_input_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
shape_val = np.array([2, 3, 4]).astype(np.int64)
empty_val = np.array([]).astype(np.int64)
empty_ts = helper.make_tensor(name='empty_tensor',
data_type=TensorProto.INT32,
......@@ -1021,6 +1018,21 @@ def deconv_stride_test():
@onnx_test
def dequantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.INT8, [5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT, [5])
node = onnx.helper.make_node(
'DequantizeLinear',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def dequantizelinear_zero_point_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.INT8, [5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1])
arg2 = helper.make_tensor_value_info('2', TensorProto.INT8, [1])
......@@ -1217,98 +1229,6 @@ def equal_bool_test():
return ([node1, node2], [x1, x2], [y])
@onnx_test
def greater_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Greater',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def greater_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Greater',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def less_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Less',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def less_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Less',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def lessorequal_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [3])
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'LessOrEqual',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x1, x2], [y])
@onnx_test
def erf_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
......@@ -1376,6 +1296,29 @@ def flatten_test():
return ([node, node2], [x], [y, y2])
@onnx_test
def flatten_nonstd_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 5, 4])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [6, 20])
y2 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [2, 60])
trans = helper.make_node(
'Transpose',
inputs=['0'],
outputs=['tx'],
perm=[0, 1, 3, 2],
)
node = onnx.helper.make_node('Flatten',
inputs=['tx'],
axis=2,
outputs=['2'])
node2 = onnx.helper.make_node('Flatten', inputs=['tx'], outputs=['3'])
return ([trans, node, node2], [x], [y, y2])
@onnx_test
def floor_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
......@@ -1491,6 +1434,23 @@ def gemm_ex_brcst_test():
return ([node], [m1, m2, m3], [y])
@onnx_test
def gemm_half_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [1, 1, 8, 6])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [1, 1, 8, 7])
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT16, [1, 1, 6, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [1, 1, 6, 7])
node = onnx.helper.make_node('Gemm',
inputs=['1', '2', '3'],
outputs=['y'],
alpha=0.5,
beta=0.8,
transA=1)
return ([node], [m1, m2, m3], [y])
@onnx_test
def globalavgpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16])
......@@ -1519,6 +1479,44 @@ def globalmaxpool_test():
return ([node], [x], [y])
@onnx_test
def greater_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Greater',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def greater_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Greater',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def group_conv_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 4, 16, 16])
......@@ -1595,8 +1593,6 @@ def if_literal_test():
onnx.TensorProto.FLOAT, [5])
else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT, [5])
empty_out = onnx.helper.make_tensor_value_info('empty_out',
onnx.TensorProto.FLOAT, [])
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
y = np.array([5, 4, 3, 2, 1]).astype(np.float32)
......@@ -2216,6 +2212,60 @@ def leaky_relu_test():
return ([node], [x], [y])
@onnx_test
def less_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Less',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def less_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Less',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def lessorequal_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [3])
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'LessOrEqual',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x1, x2], [y])
@onnx_test
def log_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
......@@ -2279,8 +2329,7 @@ def logsoftmax_test():
@onnx_test
def logsoftmax_nonstd_input_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [6, 9])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
node0 = onnx.helper.make_node('Slice',
inputs=['0'],
......@@ -2294,7 +2343,7 @@ def logsoftmax_nonstd_input_test():
outputs=['2'],
axis=-1)
return ([node0, node1], [x], [z])
return ([node0, node1], [x], [y])
@onnx_test
......@@ -2751,6 +2800,36 @@ def prelu_brcst_test():
@onnx_test
def quantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1])
arg_out = helper.make_tensor_value_info('out', TensorProto.INT8, [5])
node = onnx.helper.make_node(
'QuantizeLinear',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def quantizelinear_int32_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.INT32, [5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1])
arg_out = helper.make_tensor_value_info('out', TensorProto.INT8, [5])
node = onnx.helper.make_node(
'QuantizeLinear',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def quantizelinear_zero_point_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1])
arg2 = helper.make_tensor_value_info('2', TensorProto.INT8, [1])
......@@ -3145,8 +3224,6 @@ def reshape_test():
@onnx_test
def reshape_non_standard_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4])
trans_x = helper.make_tensor_value_info('trans_x', TensorProto.FLOAT,
[2, 4, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [4, 3, 2])
trans = helper.make_node(
......@@ -3356,6 +3433,25 @@ def resize_upsample_pc_test():
return ([node], [X], [Y], [scale_tensor])
@onnx_test
def scatter_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4, 5, 6])
i = helper.make_tensor_value_info('indices', TensorProto.INT32,
[2, 3, 4, 5])
u = helper.make_tensor_value_info('update', TensorProto.FLOAT,
[2, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5, 6])
node = onnx.helper.make_node(
'Scatter',
inputs=['data', 'indices', 'update'],
outputs=['y'],
axis=-2,
)
return ([node], [x, i, u], [y])
@onnx_test
def selu_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [2, 3])
......@@ -3389,7 +3485,6 @@ def shape_gather_test():
values = np.array([1])
# value = helper.make_tensor_value_info('value', TensorProto.INT32, [1])
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [7, 3, 10])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [3])
z = helper.make_tensor_value_info('z', TensorProto.FLOAT, [1])
value_tensor = helper.make_tensor(name='const_tensor',
......@@ -3696,8 +3791,7 @@ def softmax_test():
@onnx_test
def softmax_nonstd_input_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [6, 8])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
node0 = onnx.helper.make_node('Slice',
inputs=['0'],
......@@ -3708,7 +3802,7 @@ def softmax_nonstd_input_test():
node1 = onnx.helper.make_node('Softmax', inputs=['1'], outputs=['2'])
return ([node0, node1], [x], [z])
return ([node0, node1], [x], [y])
@onnx_test
......@@ -3811,8 +3905,7 @@ def squeeze_empty_axes_test():
def squeeze_unsqueeze_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, 1, 1, 2, 1])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 2])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT,
y = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, 3, 1, 2, 1])
node = onnx.helper.make_node('Squeeze',
......@@ -3825,7 +3918,7 @@ def squeeze_unsqueeze_test():
axes=[0, 1, 3, 5],
outputs=['2'])
return ([node, node2], [x], [z])
return ([node, node2], [x], [y])
@onnx_test
......
#include <iostream>
#include <fstream>
#include <vector>
#include <migraphx/common.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/rewrite_quantization.hpp>
#include <migraphx/eliminate_identity.hpp>
#include <migraphx/onnx.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/pad.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/lrn.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/unknown.hpp>
#include <migraphx/serialize.hpp>
#include "test.hpp"
migraphx::program optimize_onnx(const std::string& name, bool eliminate_deadcode = false)
migraphx::program optimize_onnx(const std::string& name, bool run_passes = false)
{
migraphx::onnx_options options;
options.skip_unknown_operators = true;
auto prog = migraphx::parse_onnx(name, options);
auto* mm = prog.get_main_module();
if(eliminate_deadcode)
migraphx::run_passes(*mm, {migraphx::dead_code_elimination{}});
if(run_passes)
migraphx::run_passes(*mm,
{migraphx::rewrite_quantization{}, migraphx::dead_code_elimination{}});
// remove the last identity instruction
auto last_ins = std::prev(mm->end());
......@@ -914,29 +922,42 @@ TEST_CASE(dequantizelinear_test)
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::int8_type, {5}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1}});
auto l2 = mm->add_parameter("2", {migraphx::shape::int8_type, {1}});
auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), l1);
l2 = mm->add_instruction(
auto dequant = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
l2);
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l0);
mm->add_instruction(migraphx::make_op("mul"), dequant, l1_mbcast);
auto prog = optimize_onnx("dequantizelinear_test.onnx", true);
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(dequantizelinear_zero_point_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::int8_type, {5}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1}});
auto l2 = mm->add_parameter("2", {migraphx::shape::int8_type, {1}});
auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), l1);
auto l2_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), l2);
l0 = mm->add_instruction(
l2_mbcast = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
l0);
auto sub = mm->add_instruction(migraphx::make_op("sub"), l0, l2_mbcast);
auto dequant = mm->add_instruction(
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l2_mbcast);
l0 = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
sub);
l0);
mm->add_instruction(migraphx::make_op("mul"), dequant, l1_mbcast);
auto sub = mm->add_instruction(migraphx::make_op("sub"), l0, l2_mbcast);
mm->add_instruction(migraphx::make_op("mul"), sub, l1_mbcast);
auto prog = optimize_onnx("dequantizelinear_test.onnx");
auto prog = optimize_onnx("dequantizelinear_zero_point_test.onnx", true);
EXPECT(p.sort() == prog.sort());
}
......@@ -955,19 +976,15 @@ migraphx::program make_dequantizelinear_axis_prog()
migraphx::make_op("broadcast", {{"axis", axis}, {"dims", input_lens}}), l2);
l2_bcast = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l2_bcast);
l0 = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
l0);
auto sub = mm->add_instruction(migraphx::make_op("sub"), l0, l2_bcast);
auto dequant = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
sub);
l0);
auto sub = mm->add_instruction(migraphx::make_op("sub"), l0, l2_bcast);
mm->add_instruction(migraphx::make_op("mul"), dequant, l1_bcast);
mm->add_instruction(migraphx::make_op("mul"), sub, l1_bcast);
return p;
}
......@@ -975,7 +992,7 @@ TEST_CASE(dequantizelinear_axis_test)
{
migraphx::program p = make_dequantizelinear_axis_prog();
auto prog = optimize_onnx("dequantizelinear_axis_test.onnx");
auto prog = optimize_onnx("dequantizelinear_axis_test.onnx", true);
EXPECT(p.sort() == prog.sort());
}
......@@ -983,7 +1000,7 @@ TEST_CASE(dequantizelinear_neg_axis_test)
{
migraphx::program p = make_dequantizelinear_axis_prog();
auto prog = optimize_onnx("dequantizelinear_neg_axis_test.onnx");
auto prog = optimize_onnx("dequantizelinear_neg_axis_test.onnx", true);
EXPECT(p.sort() == prog.sort());
}
......@@ -1166,6 +1183,21 @@ TEST_CASE(flatten_test)
EXPECT(p == prog);
}
TEST_CASE(flatten_nonstd_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 5, 4}});
auto l1 = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l0);
auto l2 = mm->add_instruction(migraphx::make_op("contiguous"), l1);
mm->add_instruction(migraphx::make_op("flatten", {{"axis", 2}}), l2);
auto l3 = mm->add_instruction(migraphx::make_op("contiguous"), l1);
mm->add_instruction(migraphx::make_op("flatten", {{"axis", 1}}), l3);
auto prog = optimize_onnx("flatten_nonstd_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(floor_test)
{
migraphx::program p;
......@@ -1252,19 +1284,26 @@ TEST_CASE(gather_elements_axis1_test)
TEST_CASE(gemm_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {5, 7}});
auto l1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {11, 5}});
auto l2 = mm->add_parameter("2", migraphx::shape{migraphx::shape::float_type});
auto t0 = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {1, 0}}}), l0);
auto t1 = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {1, 0}}}), l1);
auto bl2 =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {7, 11}}}), l2);
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {5, 7}});
auto l1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {11, 5}});
auto l2 = mm->add_parameter("2", migraphx::shape{migraphx::shape::float_type});
auto alpha = 2.f;
auto beta = 2.0f;
mm->add_instruction(migraphx::make_op("dot", {{"alpha", alpha}, {"beta", beta}}), t0, t1, bl2);
auto prog = optimize_onnx("gemm_test.onnx");
auto a_l = mm->add_literal(alpha);
auto t_a = add_common_op(*mm, migraphx::make_op("mul"), {a_l, l0});
t_a = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {1, 0}}}), t_a);
auto t1 = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {1, 0}}}), l1);
auto b_l = mm->add_literal(beta);
auto l2_b =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {7, 11}}}), l2);
auto b_b = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"output_lens", l2_b->get_shape().lens()}}), b_l);
auto l2_bb = mm->add_instruction(migraphx::make_op("mul"), l2_b, b_b);
mm->add_instruction(
migraphx::make_op("dot", {{"alpha", 1.0f}, {"beta", 1.0f}}), t_a, t1, l2_bb);
auto prog = optimize_onnx("gemm_test.onnx");
EXPECT(p == prog);
}
......@@ -1275,10 +1314,18 @@ TEST_CASE(gemm_ex_test)
auto l0 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 1, 8, 6}});
auto l1 = mm->add_parameter("2", migraphx::shape{migraphx::shape::float_type, {1, 1, 8, 7}});
auto l2 = mm->add_parameter("3", migraphx::shape{migraphx::shape::float_type, {1, 1, 6, 7}});
auto t0 = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l0);
auto alpha = 0.5f;
auto beta = 0.8f;
mm->add_instruction(migraphx::make_op("dot", {{"alpha", alpha}, {"beta", beta}}), t0, l1, l2);
auto a_l = mm->add_literal(alpha);
auto t_a = add_common_op(*mm, migraphx::make_op("mul"), {a_l, l0});
t_a = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), t_a);
auto b_l = mm->add_literal(beta);
auto b_b = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"output_lens", l2->get_shape().lens()}}), b_l);
auto l2_b = mm->add_instruction(migraphx::make_op("mul"), l2, b_b);
mm->add_instruction(migraphx::make_op("dot", {{"alpha", 1.0f}, {"beta", 1.0f}}), t_a, l1, l2_b);
auto prog = optimize_onnx("gemm_ex_test.onnx");
EXPECT(p == prog);
......@@ -1291,18 +1338,58 @@ TEST_CASE(gemm_ex_brcst_test)
auto l0 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 6}});
auto l1 = mm->add_parameter("2", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 7}});
auto l2 = mm->add_parameter("3", migraphx::shape{migraphx::shape::float_type, {1, 1, 6, 1}});
auto t0 = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l0);
std::vector<std::size_t> out_lens{1, 1, 6, 7};
auto t2 =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", out_lens}}), l2);
auto alpha = 0.5f;
auto beta = 0.8f;
mm->add_instruction(migraphx::make_op("dot", {{"alpha", alpha}, {"beta", beta}}), t0, l1, t2);
auto a_l = mm->add_literal(alpha);
auto t_a = add_common_op(*mm, migraphx::make_op("mul"), {a_l, l0});
t_a = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), t_a);
auto b_l = mm->add_literal(beta);
auto l2_b =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", out_lens}}), l2);
auto b_b = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"output_lens", l2_b->get_shape().lens()}}), b_l);
auto l2_bb = mm->add_instruction(migraphx::make_op("mul"), l2_b, b_b);
mm->add_instruction(
migraphx::make_op("dot", {{"alpha", 1.0f}, {"beta", 1.0f}}), t_a, l1, l2_bb);
auto prog = optimize_onnx("gemm_ex_brcst_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(gemm_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("1", migraphx::shape{migraphx::shape::half_type, {1, 1, 8, 6}});
auto l1 = mm->add_parameter("2", migraphx::shape{migraphx::shape::half_type, {1, 1, 8, 7}});
auto l2 = mm->add_parameter("3", migraphx::shape{migraphx::shape::half_type, {1, 1, 6, 1}});
auto alpha = 0.5f;
auto beta = 0.8f;
auto a_l = mm->add_literal(alpha);
auto t_a = add_common_op(*mm, migraphx::make_op("mul"), {a_l, l0});
t_a = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}), t_a);
t_a = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), t_a);
std::vector<std::size_t> lens = {1, 1, 6, 7};
l2 = mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", lens}}), l2);
l2 = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), l2);
auto b_l = mm->add_literal(beta);
auto b_b =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", lens}}), b_l);
auto l2_b = mm->add_instruction(migraphx::make_op("mul"), l2, b_b);
l2_b = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}), l2_b);
mm->add_instruction(migraphx::make_op("dot", {{"alpha", 1.0f}, {"beta", 1.0f}}), t_a, l1, l2_b);
auto prog = optimize_onnx("gemm_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(globalavgpool_test)
{
migraphx::program p;
......@@ -2378,41 +2465,87 @@ TEST_CASE(prelu_brcst_test)
TEST_CASE(quantizelinear_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {5}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1}});
auto l2 = mm->add_parameter("2", {migraphx::shape::int8_type, {1}});
auto min_val = mm->add_literal(-128);
auto max_val = mm->add_literal(127);
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {5}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1}});
auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), l1);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape();
std::vector<int> min_data(s.elements(), 0);
std::vector<int> max_data(s.elements(), 255);
auto min_arg = mm->add_literal(s, min_data);
auto max_arg = mm->add_literal(s, max_data);
auto clip = mm->add_instruction(migraphx::make_op("clip"), round, min_arg, max_arg);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
clip);
auto prog = optimize_onnx("quantizelinear_test.onnx", true);
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(quantizelinear_int32_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::int32_type, {5}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1}});
auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), l1);
l0 = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l0);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
l2 = mm->add_instruction(
auto s = round->get_shape();
std::vector<int> min_data(s.elements(), 0);
std::vector<int> max_data(s.elements(), 255);
auto min_arg = mm->add_literal(s, min_data);
auto max_arg = mm->add_literal(s, max_data);
auto clip = mm->add_instruction(migraphx::make_op("clip"), round, min_arg, max_arg);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
l2);
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
clip);
auto prog = optimize_onnx("quantizelinear_int32_test.onnx", true);
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(quantizelinear_zero_point_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {5}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1}});
auto l2 = mm->add_parameter("2", {migraphx::shape::int8_type, {1}});
auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), l1);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto l2_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), l2);
round = mm->add_instruction(
l2_mbcast = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
round);
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l2_mbcast);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_mbcast);
min_val =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), min_val);
max_val =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), max_val);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_val, max_val);
auto s = round->get_shape();
std::vector<int> min_data(s.elements(), -128);
std::vector<int> max_data(s.elements(), 127);
auto min_arg = mm->add_literal(s, min_data);
auto max_arg = mm->add_literal(s, max_data);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_arg, max_arg);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int8_type)}}),
clip);
auto prog = optimize_onnx("quantizelinear_test.onnx");
auto prog = optimize_onnx("quantizelinear_zero_point_test.onnx", true);
EXPECT(p.sort() == prog.sort());
}
......@@ -2423,12 +2556,9 @@ migraphx::program make_quantizelinear_axis_prog()
int axis = 2;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, input_lens});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {5}});
auto l2 = mm->add_parameter("2", {migraphx::shape::int8_type, {5}});
auto min_val = mm->add_literal(-128);
auto max_val = mm->add_literal(127);
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, input_lens});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {5}});
auto l2 = mm->add_parameter("2", {migraphx::shape::int8_type, {5}});
auto l1_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", axis}, {"dims", input_lens}}), l1);
......@@ -2438,18 +2568,15 @@ migraphx::program make_quantizelinear_axis_prog()
migraphx::make_op("broadcast", {{"axis", axis}, {"dims", input_lens}}), l2);
l2_bcast = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l2_bcast);
round = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
round);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_bcast);
min_val = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"output_lens", {1, 1, 5, 1}}}), min_val);
max_val = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"output_lens", {1, 1, 5, 1}}}), max_val);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_val, max_val);
auto s = round->get_shape();
std::vector<int> min_data(s.elements(), -128);
std::vector<int> max_data(s.elements(), 127);
auto min_arg = mm->add_literal(s, min_data);
auto max_arg = mm->add_literal(s, max_data);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_arg, max_arg);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int8_type)}}),
......@@ -2461,7 +2588,7 @@ TEST_CASE(quantizelinear_axis_test)
{
migraphx::program p = make_quantizelinear_axis_prog();
auto prog = optimize_onnx("quantizelinear_axis_test.onnx");
auto prog = optimize_onnx("quantizelinear_axis_test.onnx", true);
EXPECT(p.sort() == prog.sort());
}
......@@ -2469,7 +2596,7 @@ TEST_CASE(quantizelinear_neg_axis_test)
{
migraphx::program p = make_quantizelinear_axis_prog();
auto prog = optimize_onnx("quantizelinear_neg_axis_test.onnx");
auto prog = optimize_onnx("quantizelinear_neg_axis_test.onnx", true);
EXPECT(p.sort() == prog.sort());
}
......@@ -3162,6 +3289,23 @@ TEST_CASE(round_test)
EXPECT(p == prog);
}
TEST_CASE(scatter_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l1 =
mm->add_parameter("indices", migraphx::shape{migraphx::shape::int32_type, {2, 3, 4, 5}});
auto l2 =
mm->add_parameter("update", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
int axis = -2;
auto r = mm->add_instruction(migraphx::make_op("scatter", {{"axis", axis}}), l0, l1, l2);
mm->add_return({r});
auto prog = migraphx::parse_onnx("scatter_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(selu_test)
{
migraphx::program p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment