Unverified Commit c4cee345 authored by Umang Yadav's avatar Umang Yadav Committed by GitHub
Browse files

Merge branch 'develop' into rocblas_fp8

parents c40a39c3 eafd55de
...@@ -139,7 +139,8 @@ const std::string math_template = R"__migraphx__( ...@@ -139,7 +139,8 @@ const std::string math_template = R"__migraphx__(
#include <migraphx/kernels/pointwise.hpp> #include <migraphx/kernels/pointwise.hpp>
#include <migraphx/kernels/math.hpp> #include <migraphx/kernels/math.hpp>
#include <migraphx/kernels/types.hpp> #include <migraphx/kernels/types.hpp>
using namespace migraphx;
namespace migraphx {
extern "C" { extern "C" {
__global__ void kernel(${type}* p) __global__ void kernel(${type}* p)
{ {
...@@ -148,6 +149,7 @@ __global__ void kernel(${type}* p) ...@@ -148,6 +149,7 @@ __global__ void kernel(${type}* p)
} }
} }
}
int main() {} int main() {}
...@@ -348,15 +350,13 @@ TEST_CASE(compile_math) ...@@ -348,15 +350,13 @@ TEST_CASE(compile_math)
auto vec_sizes = {2, 4, 6}; auto vec_sizes = {2, 4, 6};
for(auto&& t : migraphx::shape::types()) for(auto&& t : migraphx::shape::types())
{ {
if(contains({migraphx::shape::bool_type, if(contains({migraphx::shape::bool_type, migraphx::shape::tuple_type}, t))
migraphx::shape::fp8e4m3fnuz_type,
migraphx::shape::tuple_type},
t))
continue; continue;
auto name = migraphx::shape::cpp_type(t); auto name = migraphx::shape::cpp_type(t);
if(t == migraphx::shape::half_type) if(t == migraphx::shape::half_type)
name.insert(0, "migraphx::"); name.insert(0, "migraphx::");
data_types.push_back(name); data_types.push_back(name);
// fp8 doesn't have vectorization support yet, therefore skip it for now.
if(t != migraphx::shape::fp8e4m3fnuz_type) if(t != migraphx::shape::fp8e4m3fnuz_type)
{ {
migraphx::transform(vec_sizes, std::back_inserter(data_types), [&](auto i) { migraphx::transform(vec_sizes, std::back_inserter(data_types), [&](auto i) {
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <atomic> #include <atomic>
#include <algorithm> #include <algorithm>
#include <array>
#include <cassert> #include <cassert>
#include <cstdio> #include <cstdio>
#include <cstdlib> #include <cstdlib>
......
...@@ -46,8 +46,12 @@ std::function<F> ...@@ -46,8 +46,12 @@ std::function<F>
compile_function(const std::string& src, const std::string& flags, const std::string& fname) compile_function(const std::string& src, const std::string& flags, const std::string& fname)
{ {
migraphx::src_compiler compiler; migraphx::src_compiler compiler;
compiler.flags = flags + "-std=c++14 -fPIC -shared"; compiler.flags = flags + "-std=c++14 -fPIC -shared";
#ifdef _WIN32
compiler.output = "simple.dll";
#else
compiler.output = "libsimple.so"; compiler.output = "libsimple.so";
#endif
migraphx::src_file f{"main.cpp", src}; migraphx::src_file f{"main.cpp", src};
auto image = compiler.compile({f}); auto image = compiler.compile({f});
return migraphx::dynamic_loader{image}.get_function<F>(fname); return migraphx::dynamic_loader{image}.get_function<F>(fname);
......
4a8203033930da506b356cdaf88b1531d8d8fca3 a5537f2f563d4975c7e6121a7eb260bbbfd9455a
averagepool_dilate_test:
Y
xy" AveragePool*
dilations@*
kernel_shape@*
pads@@*
strides@averagepool_dilate_testZ
x



b
y



B
\ No newline at end of file
...@@ -276,6 +276,22 @@ def averagepool_1d_test(): ...@@ -276,6 +276,22 @@ def averagepool_1d_test():
return ([node], [x], [out]) return ([node], [x], [out])
@onnx_test()
def averagepool_dilate_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4, 2])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2],
strides=[1],
pads=[1, 1],
dilations=[3])
return ([node], [x], [y])
@onnx_test() @onnx_test()
def averagepool_3d_test(): def averagepool_3d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5, 5])
...@@ -4882,6 +4898,22 @@ def maxpool_notset_test(): ...@@ -4882,6 +4898,22 @@ def maxpool_notset_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test()
def maxpool_dilate_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4, 2])
node = onnx.helper.make_node('MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2],
strides=[1],
pads=[1, 1],
dilations=[3])
return ([node], [x], [y])
@onnx_test() @onnx_test()
def maxpool_same_upper_test(): def maxpool_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
...@@ -5962,6 +5994,263 @@ def qlinearadd_bcast_test(): ...@@ -5962,6 +5994,263 @@ def qlinearadd_bcast_test():
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c]) [sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearaveragepool_1d_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 32])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 3, 31])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.05])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[16])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 3, 3])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.015])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[16])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_ceil_test():
x = helper.make_tensor_value_info('x', TensorProto.UINT8, [1, 1, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.UINT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.UINT8, [1, 1, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.05])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.UINT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[3, 3],
strides=[2, 2],
ceil_mode=True,
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_dilations_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 1, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 1, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.25])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[84])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
strides=[1, 1],
dilations=[2, 2],
ceil_mode=True,
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_pads_count_include_pad_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 6, 6])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.01])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[32])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[3, 3],
pads=[2, 2, 2, 2],
count_include_pad=1,
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_same_lower_test():
x = helper.make_tensor_value_info('x', TensorProto.UINT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.UINT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.UINT8, [1, 3, 4, 4])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.5])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.UINT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad="SAME_LOWER",
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[32])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 4, 4])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.25])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad="SAME_UPPER",
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_strides_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 8, 8])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.05])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[8])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[5, 5],
strides=[2, 2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_3d_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 3, 3, 3])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 2, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.02])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2, 2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 1, 5, 5])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 1, 1, 1])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.5])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[10])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
channels_last=0,
auto_pad='NOTSET')
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_nt_cip_test():
x = helper.make_tensor_value_info('x', TensorProto.UINT8, [1, 1, 5, 5])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.UINT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.UINT8, [1, 1, 1, 1])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.5])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.UINT8, [],
[10])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
channels_last=0,
auto_pad='NOTSET',
count_include_pad=1)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test() @onnx_test()
def qlinearconv_test(): def qlinearconv_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html # https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
...@@ -6094,6 +6383,26 @@ def qlinearglobalavgpool_test(): ...@@ -6094,6 +6383,26 @@ def qlinearglobalavgpool_test():
return ([n], [x], [y], [sc_x, z_pt_x, sc_y, z_pt_y]) return ([n], [x], [y], [sc_x, z_pt_x, sc_y, z_pt_y])
@onnx_test()
def qlinearleakyrelu_test():
x = helper.make_tensor_value_info('X', TensorProto.INT8, [64])
sc_x = helper.make_tensor('X_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_x = helper.make_tensor('X_zero_point', TensorProto.INT8, [], [0])
sc_y = helper.make_tensor('Y_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_y = helper.make_tensor('Y_zero_point', TensorProto.INT8, [], [10])
y = helper.make_tensor_value_info('Y', TensorProto.INT8, [64])
node = onnx.helper.make_node(
'QLinearLeakyRelu',
inputs=['X', 'X_scale', 'X_zero_point', 'Y_scale', 'Y_zero_point'],
outputs=['Y'],
alpha=1.1,
)
return ([node], [x], [y], [sc_x, zero_pt_x, sc_y, zero_pt_y])
def qlinearmatmul_1D_test(): def qlinearmatmul_1D_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [8]) a = helper.make_tensor_value_info('A', TensorProto.UINT8, [8])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05]) sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
...@@ -6234,6 +6543,26 @@ def qlinearmul_bcast_test(): ...@@ -6234,6 +6543,26 @@ def qlinearmul_bcast_test():
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c]) [sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearsigmoid_test():
x = helper.make_tensor_value_info('X', TensorProto.INT8, [64])
sc_x = helper.make_tensor('X_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_x = helper.make_tensor('X_zero_point', TensorProto.INT8, [], [0])
sc_y = helper.make_tensor('Y_scale', TensorProto.FLOAT, [], [0.0035])
zero_pt_y = helper.make_tensor('Y_zero_point', TensorProto.INT8, [],
[-128])
y = helper.make_tensor_value_info('Y', TensorProto.INT8, [64])
node = onnx.helper.make_node(
'QLinearSigmoid',
inputs=['X', 'X_scale', 'X_zero_point', 'Y_scale', 'Y_zero_point'],
outputs=['Y'],
)
return ([node], [x], [y], [sc_x, zero_pt_x, sc_y, zero_pt_y])
@onnx_test() @onnx_test()
def quantizelinear_test(): def quantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5]) arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5])
...@@ -7383,8 +7712,7 @@ def scatter_none_test(): ...@@ -7383,8 +7712,7 @@ def scatter_none_test():
return ([node], [x, i, u], [y]) return ([node], [x, i, u], [y])
@onnx_test() def make_scatternd_test(reduction="none"):
def scatternd_add_test():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2, 2]) data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2, 2])
indices = helper.make_tensor_value_info('indices', TensorProto.INT64, indices = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 1, 2]) [2, 1, 2])
...@@ -7396,44 +7724,39 @@ def scatternd_add_test(): ...@@ -7396,44 +7724,39 @@ def scatternd_add_test():
node = onnx.helper.make_node('ScatterND', node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'], inputs=['data', 'indices', 'updates'],
outputs=['output'], outputs=['output'],
reduction="add") reduction=reduction)
return ([node], [data, indices, updates], [output]) return ([node], [data, indices, updates], [output])
@onnx_test()
def scatternd_add_test():
return make_scatternd_test("add")
@onnx_test() @onnx_test()
def scatternd_mul_test(): def scatternd_mul_test():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2, 2]) return make_scatternd_test("mul")
indices = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 1, 2])
updates = helper.make_tensor_value_info('updates', TensorProto.FLOAT,
[2, 1, 2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2])
node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'],
reduction="mul")
return ([node], [data, indices, updates], [output]) @onnx_test()
def scatternd_max_test():
return make_scatternd_test("max")
@onnx_test()
def scatternd_min_test():
return make_scatternd_test("min")
@onnx_test() @onnx_test()
def scatternd_test(): def scatternd_test():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2, 2]) return make_scatternd_test()
indices = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 1, 2])
updates = helper.make_tensor_value_info('updates', TensorProto.FLOAT,
[2, 1, 2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2])
node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'])
return ([node], [data, indices, updates], [output]) @onnx_test()
def scatternd_invalid_reduction_test():
return make_scatternd_test("invalid")
@onnx_test() @onnx_test()
...@@ -9220,6 +9543,97 @@ def undefined_test(): ...@@ -9220,6 +9543,97 @@ def undefined_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test()
def unique_dynamic_sorted_test():
x = helper.make_tensor_value_info('X', TensorProto.FLOAT, [6])
y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [4])
y_ind = helper.make_tensor_value_info('indices', TensorProto.INT64, [4])
x_ind = helper.make_tensor_value_info('inverse_indices', TensorProto.INT64,
[6])
count = helper.make_tensor_value_info('counts', TensorProto.INT64, [4])
node = onnx.helper.make_node(
'Unique',
inputs=['X'],
outputs=['Y', 'indices', 'inverse_indices', 'counts'],
axis=0,
sorted=1)
return ([node], [x], [y, y_ind, x_ind, count])
@onnx_test()
def unique_dynamic_sorted_3D_test():
x = helper.make_tensor_value_info('X', TensorProto.INT64, [4, 4, 4])
y = helper.make_tensor_value_info('Y', TensorProto.INT64, [16])
y_ind = helper.make_tensor_value_info('indices', TensorProto.INT64, [16])
x_ind = helper.make_tensor_value_info('inverse_indices', TensorProto.INT64,
[64])
count = helper.make_tensor_value_info('counts', TensorProto.INT64, [16])
node = onnx.helper.make_node(
'Unique',
inputs=['X'],
outputs=['Y', 'indices', 'inverse_indices', 'counts'],
sorted=1)
return ([node], [x], [y, y_ind, x_ind, count])
@onnx_test()
def unique_dynamic_unsorted_test():
x = helper.make_tensor_value_info('X', TensorProto.FLOAT, [6])
y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [4])
y_ind = helper.make_tensor_value_info('indices', TensorProto.INT64, [4])
x_ind = helper.make_tensor_value_info('inverse_indices', TensorProto.INT64,
[6])
count = helper.make_tensor_value_info('counts', TensorProto.INT64, [4])
node = onnx.helper.make_node(
'Unique',
inputs=['X'],
outputs=['Y', 'indices', 'inverse_indices', 'counts'],
axis=0,
sorted=0)
return ([node], [x], [y, y_ind, x_ind, count])
@onnx_test()
def unique_sorted_test():
x = helper.make_tensor('X', TensorProto.FLOAT, [6], [2, 1, 1, 3, 4, 3])
y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [4])
y_ind = helper.make_tensor_value_info('indices', TensorProto.INT64, [4])
x_ind = helper.make_tensor_value_info('inverse_indices', TensorProto.INT64,
[6])
count = helper.make_tensor_value_info('counts', TensorProto.INT64, [4])
node = onnx.helper.make_node(
'Unique',
inputs=['X'],
outputs=['Y', 'indices', 'inverse_indices', 'counts'],
axis=0,
sorted=1)
return ([node], [], [y, y_ind, x_ind, count], [x])
@onnx_test()
def unique_unsorted_test():
x = helper.make_tensor('X', TensorProto.FLOAT, [6], [2, 1, 1, 3, 4, 3])
y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [4])
y_ind = helper.make_tensor_value_info('indices', TensorProto.INT64, [4])
x_ind = helper.make_tensor_value_info('inverse_indices', TensorProto.INT64,
[6])
count = helper.make_tensor_value_info('counts', TensorProto.INT64, [4])
node = onnx.helper.make_node(
'Unique',
inputs=['X'],
outputs=['Y', 'indices', 'inverse_indices', 'counts'],
axis=0,
sorted=0)
return ([node], [], [y, y_ind, x_ind, count], [x])
@onnx_test() @onnx_test()
def unknown_test(): def unknown_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
......
maxpool_dilate_test:
U
xy"MaxPool*
dilations@*
kernel_shape@*
pads@@*
strides@maxpool_dilate_testZ
x



b
y



B
\ No newline at end of file
...@@ -296,13 +296,32 @@ TEST_CASE(averagepool_1d_test) ...@@ -296,13 +296,32 @@ TEST_CASE(averagepool_1d_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {1}}, {"stride", {1}},
{"lengths", {3}}}), {"lengths", {3}},
{"dilations", {1}}}),
l0); l0);
auto prog = optimize_onnx("averagepool_1d_test.onnx"); auto prog = optimize_onnx("averagepool_1d_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(averagepool_dilate_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 4, 3}});
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1}},
{"stride", {1}},
{"lengths", {2}},
{"dilations", {3}}}),
input);
auto prog = optimize_onnx("averagepool_dilate_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(averagepool_3d_test) TEST_CASE(averagepool_3d_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -312,7 +331,8 @@ TEST_CASE(averagepool_3d_test) ...@@ -312,7 +331,8 @@ TEST_CASE(averagepool_3d_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0, 0, 0}}, {"padding", {0, 0, 0, 0, 0, 0}},
{"stride", {1, 1, 1}}, {"stride", {1, 1, 1}},
{"lengths", {3, 3, 3}}}), {"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}}}),
l0); l0);
auto prog = optimize_onnx("averagepool_3d_test.onnx"); auto prog = optimize_onnx("averagepool_3d_test.onnx");
...@@ -332,6 +352,7 @@ TEST_CASE(averagepool_dyn_test) ...@@ -332,6 +352,7 @@ TEST_CASE(averagepool_dyn_test)
{"mode", migraphx::op::pooling_mode::average}, {"mode", migraphx::op::pooling_mode::average},
{"stride", {2, 2, 2}}, {"stride", {2, 2, 2}},
{"lengths", {3, 3, 3}}, {"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}},
{"padding", {1, 1, 1, 1, 1, 1}}, {"padding", {1, 1, 1, 1, 1, 1}},
{"padding_mode", 0}, {"padding_mode", 0},
}), }),
...@@ -357,6 +378,7 @@ TEST_CASE(averagepool_dyn_autopad_test) ...@@ -357,6 +378,7 @@ TEST_CASE(averagepool_dyn_autopad_test)
{"mode", migraphx::op::pooling_mode::average}, {"mode", migraphx::op::pooling_mode::average},
{"stride", {2, 2, 2}}, {"stride", {2, 2, 2}},
{"lengths", {3, 3, 3}}, {"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}},
{"padding", {0, 0, 0, 0, 0, 0}}, {"padding", {0, 0, 0, 0, 0, 0}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper}, {"padding_mode", migraphx::op::padding_mode_t::same_upper},
}), }),
...@@ -394,7 +416,8 @@ TEST_CASE(averagepool_notset_test) ...@@ -394,7 +416,8 @@ TEST_CASE(averagepool_notset_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}}, {"padding", {2, 2, 2, 2}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {6, 6}}}), {"lengths", {6, 6}},
{"dilations", {1, 1}}}),
input); input);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins); migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins);
...@@ -415,7 +438,8 @@ TEST_CASE(averagepool_nt_cip_test) ...@@ -415,7 +438,8 @@ TEST_CASE(averagepool_nt_cip_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {6, 6}}}), {"lengths", {6, 6}},
{"dilations", {1, 1}}}),
ins_pad); ins_pad);
mm->add_return({ret}); mm->add_return({ret});
...@@ -437,6 +461,7 @@ TEST_CASE(averagepool_same_lower_test) ...@@ -437,6 +461,7 @@ TEST_CASE(averagepool_same_lower_test)
{"padding", {1, 1, 1, 1}}, {"padding", {1, 1, 1, 1}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}, {"lengths", {2, 2}},
{"dilations", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::default_}, {"padding_mode", migraphx::op::padding_mode_t::default_},
}), }),
input); input);
...@@ -459,7 +484,8 @@ TEST_CASE(averagepool_sl_cip_test) ...@@ -459,7 +484,8 @@ TEST_CASE(averagepool_sl_cip_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
ins_pad); ins_pad);
mm->add_return({ret}); mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx"); auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx");
...@@ -476,7 +502,8 @@ TEST_CASE(averagepool_same_upper_test) ...@@ -476,7 +502,8 @@ TEST_CASE(averagepool_same_upper_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1, 1, 1}}, {"padding", {1, 1, 1, 1}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
input); input);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins); migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins);
...@@ -1307,7 +1334,8 @@ TEST_CASE(conv_bn_relu_maxpool_test) ...@@ -1307,7 +1334,8 @@ TEST_CASE(conv_bn_relu_maxpool_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l7); l7);
auto prog = optimize_onnx("conv_bn_relu_maxpool_test.onnx"); auto prog = optimize_onnx("conv_bn_relu_maxpool_test.onnx");
...@@ -1505,7 +1533,8 @@ TEST_CASE(conv_relu_maxpool_test) ...@@ -1505,7 +1533,8 @@ TEST_CASE(conv_relu_maxpool_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l6); l6);
auto prog = optimize_onnx("conv_relu_maxpool_test.onnx"); auto prog = optimize_onnx("conv_relu_maxpool_test.onnx");
...@@ -1530,7 +1559,8 @@ TEST_CASE(conv_relu_maxpool_x2_test) ...@@ -1530,7 +1559,8 @@ TEST_CASE(conv_relu_maxpool_x2_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l6); l6);
auto l8 = mm->add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}}); auto l8 = mm->add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}});
...@@ -1546,7 +1576,8 @@ TEST_CASE(conv_relu_maxpool_x2_test) ...@@ -1546,7 +1576,8 @@ TEST_CASE(conv_relu_maxpool_x2_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l13); l13);
auto prog = optimize_onnx("conv_relu_maxpool_x2_test.onnx"); auto prog = optimize_onnx("conv_relu_maxpool_x2_test.onnx");
...@@ -4245,6 +4276,7 @@ TEST_CASE(lppool_l1_test) ...@@ -4245,6 +4276,7 @@ TEST_CASE(lppool_l1_test)
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {1}}, {"stride", {1}},
{"lengths", {3}}, {"lengths", {3}},
{"dilations", {1}},
{"lp_order", 1}}), {"lp_order", 1}}),
l0); l0);
auto prog = optimize_onnx("lppool_l1_test.onnx"); auto prog = optimize_onnx("lppool_l1_test.onnx");
...@@ -4261,6 +4293,7 @@ TEST_CASE(lppool_l2_test) ...@@ -4261,6 +4293,7 @@ TEST_CASE(lppool_l2_test)
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {1}}, {"stride", {1}},
{"lengths", {3}}, {"lengths", {3}},
{"dilations", {1}},
{"lp_order", 2}}), {"lp_order", 2}}),
l0); l0);
auto prog = optimize_onnx("lppool_l2_test.onnx"); auto prog = optimize_onnx("lppool_l2_test.onnx");
...@@ -4513,7 +4546,8 @@ TEST_CASE(maxpool_notset_test) ...@@ -4513,7 +4546,8 @@ TEST_CASE(maxpool_notset_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 1, 1}}, {"padding", {0, 0, 1, 1}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {6, 6}}}), {"lengths", {6, 6}},
{"dilations", {1, 1}}}),
input); input);
auto prog = optimize_onnx("maxpool_notset_test.onnx"); auto prog = optimize_onnx("maxpool_notset_test.onnx");
...@@ -4521,6 +4555,24 @@ TEST_CASE(maxpool_notset_test) ...@@ -4521,6 +4555,24 @@ TEST_CASE(maxpool_notset_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(maxpool_dilate_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 4, 3}});
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {1, 1}},
{"stride", {1}},
{"lengths", {2}},
{"dilations", {3}}}),
input);
auto prog = optimize_onnx("maxpool_dilate_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(maxpool_same_upper_test) TEST_CASE(maxpool_same_upper_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -4530,7 +4582,8 @@ TEST_CASE(maxpool_same_upper_test) ...@@ -4530,7 +4582,8 @@ TEST_CASE(maxpool_same_upper_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 1, 1}}, {"padding", {0, 0, 1, 1}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
input); input);
auto prog = optimize_onnx("maxpool_same_upper_test.onnx"); auto prog = optimize_onnx("maxpool_same_upper_test.onnx");
...@@ -4773,8 +4826,9 @@ TEST_CASE(multinomial_test) ...@@ -4773,8 +4826,9 @@ TEST_CASE(multinomial_test)
migraphx::shape s{migraphx::shape::float_type, {1}}; migraphx::shape s{migraphx::shape::float_type, {1}};
std::vector<float> seed_data = {seed}; std::vector<float> seed_data = {seed};
auto seed_input = mm->add_literal(migraphx::literal(s, seed_data)); auto seed_input = mm->add_literal(migraphx::literal(s, seed_data));
auto rand_dummy = auto rand_dummy = mm->add_literal(
mm->add_literal(migraphx::literal{migraphx::shape::float_type, {batch_size * sample_size}}); migraphx::literal{migraphx::shape{migraphx::shape::float_type, {batch_size, sample_size}},
std::vector<float>(batch_size * sample_size)});
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, rand_dummy); auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, rand_dummy);
mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms); mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms);
...@@ -4925,8 +4979,9 @@ TEST_CASE(multinomial_int64_test) ...@@ -4925,8 +4979,9 @@ TEST_CASE(multinomial_int64_test)
auto seed_input = mm->add_literal(migraphx::literal(s, data)); auto seed_input = mm->add_literal(migraphx::literal(s, data));
// static size // static size
auto rand_dummy = auto rand_dummy = mm->add_literal(
mm->add_literal(migraphx::literal{migraphx::shape::float_type, {batch_size * sample_size}}); migraphx::literal{migraphx::shape{migraphx::shape::float_type, {batch_size, sample_size}},
std::vector<float>(batch_size * sample_size)});
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, rand_dummy); auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, rand_dummy);
mm->add_instruction(migraphx::make_op("multinomial", {{"dtype", dtype}}), cdf, randoms); mm->add_instruction(migraphx::make_op("multinomial", {{"dtype", dtype}}), cdf, randoms);
auto prog = optimize_onnx("multinomial_int64_test.onnx"); auto prog = optimize_onnx("multinomial_int64_test.onnx");
...@@ -5542,6 +5597,54 @@ TEST_CASE(qlinearadd_test) ...@@ -5542,6 +5597,54 @@ TEST_CASE(qlinearadd_test)
EXPECT(p.sort() == prog.sort()); EXPECT(p.sort() == prog.sort());
} }
TEST_CASE(qlinearaveragepool_notset_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {0}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {10}});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::int8_type, {1, 1, 5, 5}});
auto scale_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 5, 5}}}), sc_x);
auto z_pt_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 5, 5}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y =
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
fp_x);
fp_y = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), fp_y);
auto scale_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), sc_y);
auto z_pt_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearaveragepool_notset_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(qlinearconv_test) TEST_CASE(qlinearconv_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -5642,6 +5745,46 @@ TEST_CASE(qlinearglobalavgpool_test) ...@@ -5642,6 +5745,46 @@ TEST_CASE(qlinearglobalavgpool_test)
EXPECT(p.sort() == prog.sort()); EXPECT(p.sort() == prog.sort());
} }
TEST_CASE(qlinearleakyrelu_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("X", {migraphx::shape::int8_type, {64}});
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {0}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {10}});
auto scale_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_x);
auto z_pt_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y = mm->add_instruction(migraphx::make_op("leaky_relu", {{"alpha", 1.1}}), fp_x);
auto scale_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_y);
auto z_pt_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearleakyrelu_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearmatmul_1D_test) TEST_CASE(qlinearmatmul_1D_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -5807,6 +5950,46 @@ TEST_CASE(qlinearmul_test) ...@@ -5807,6 +5950,46 @@ TEST_CASE(qlinearmul_test)
EXPECT(p.sort() == prog.sort()); EXPECT(p.sort() == prog.sort());
} }
TEST_CASE(qlinearsigmoid_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("X", {migraphx::shape::int8_type, {64}});
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {0}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.0035}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {-128}});
auto scale_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_x);
auto z_pt_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y = mm->add_instruction(migraphx::make_op("sigmoid"), fp_x);
auto scale_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_y);
auto z_pt_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearsigmoid_test.onnx");
EXPECT(p.sort() == prog.sort());
}
migraphx::instruction_ref insert_quantizelinear_clip(migraphx::module& m, migraphx::instruction_ref insert_quantizelinear_clip(migraphx::module& m,
const migraphx::instruction_ref ins, const migraphx::instruction_ref ins,
const migraphx::instruction_ref round, const migraphx::instruction_ref round,
...@@ -7094,20 +7277,35 @@ TEST_CASE(scatter_none_test) ...@@ -7094,20 +7277,35 @@ TEST_CASE(scatter_none_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(scatternd_test) void scatternd_test_base(const std::string& reduction, const std::string& onnx_file)
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}}); auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}});
auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1, 2}}); auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1, 2}});
auto l2 = mm->add_parameter("updates", migraphx::shape{migraphx::shape::float_type, {2, 1, 2}}); auto l2 = mm->add_parameter("updates", migraphx::shape{migraphx::shape::float_type, {2, 1, 2}});
auto r = mm->add_instruction(migraphx::make_op("scatternd_none"), l0, l1, l2); auto r = mm->add_instruction(migraphx::make_op("scatternd_" + reduction), l0, l1, l2);
mm->add_return({r}); mm->add_return({r});
auto prog = migraphx::parse_onnx("scatternd_test.onnx"); auto prog = migraphx::parse_onnx(onnx_file);
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(scatternd_test) { scatternd_test_base("none", "scatternd_test.onnx"); }
TEST_CASE(scatternd_add_test) { scatternd_test_base("add", "scatternd_add_test.onnx"); }
TEST_CASE(scatternd_mul_test) { scatternd_test_base("mul", "scatternd_mul_test.onnx"); }
TEST_CASE(scatternd_max_test) { scatternd_test_base("max", "scatternd_max_test.onnx"); }
TEST_CASE(scatternd_min_test) { scatternd_test_base("min", "scatternd_min_test.onnx"); }
TEST_CASE(scatternd_invalid_reduction_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("scatternd_invalid_reduction_test.onnx"); }));
}
TEST_CASE(scatternd_dyn_test) TEST_CASE(scatternd_dyn_test)
{ {
// dynamic input. // dynamic input.
...@@ -7131,34 +7329,6 @@ TEST_CASE(scatternd_dyn_test) ...@@ -7131,34 +7329,6 @@ TEST_CASE(scatternd_dyn_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(scatternd_add_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}});
auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1, 2}});
auto l2 = mm->add_parameter("updates", migraphx::shape{migraphx::shape::float_type, {2, 1, 2}});
auto r = mm->add_instruction(migraphx::make_op("scatternd_add"), l0, l1, l2);
mm->add_return({r});
auto prog = migraphx::parse_onnx("scatternd_add_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(scatternd_mul_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}});
auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1, 2}});
auto l2 = mm->add_parameter("updates", migraphx::shape{migraphx::shape::float_type, {2, 1, 2}});
auto r = mm->add_instruction(migraphx::make_op("scatternd_mul"), l0, l1, l2);
mm->add_return({r});
auto prog = migraphx::parse_onnx("scatternd_mul_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(selu_test) TEST_CASE(selu_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -8436,6 +8606,86 @@ TEST_CASE(undefined_test) ...@@ -8436,6 +8606,86 @@ TEST_CASE(undefined_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(unique_dynamic_sorted_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {6}};
auto x = mm->add_parameter("X", s);
auto out = mm->add_instruction(migraphx::make_op("unique", {{"sorted", 1}, {"axis", 0}}), x);
auto y = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), out);
auto y_ind = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), out);
auto x_ind = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 2}}), out);
auto count = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 3}}), out);
mm->add_return({y, y_ind, x_ind, count});
auto prog = migraphx::parse_onnx("unique_dynamic_sorted_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(unique_dynamic_sorted_3D_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::int64_type, {4, 4, 4}};
auto x = mm->add_parameter("X", s);
auto out = mm->add_instruction(migraphx::make_op("unique", {{"sorted", 1}}), x);
auto y = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), out);
auto y_ind = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), out);
auto x_ind = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 2}}), out);
auto count = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 3}}), out);
mm->add_return({y, y_ind, x_ind, count});
auto prog = migraphx::parse_onnx("unique_dynamic_sorted_3D_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(unique_sorted_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s_x{migraphx::shape::float_type, {6}};
std::vector<float> x_data = {2, 1, 1, 3, 4, 3};
auto x = mm->add_literal(migraphx::literal(s_x, x_data));
auto out = mm->add_instruction(migraphx::make_op("unique", {{"sorted", 1}, {"axis", 0}}), x);
auto y = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), out);
auto y_idx = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), out);
auto x_idx = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 2}}), out);
auto count = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 3}}), out);
mm->add_return({y, y_idx, x_idx, count});
auto prog = migraphx::parse_onnx("unique_sorted_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(unique_unsorted_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s_x{migraphx::shape::float_type, {6}};
std::vector<float> x_data = {2, 1, 1, 3, 4, 3};
auto x = mm->add_literal(migraphx::literal(s_x, x_data));
auto out = mm->add_instruction(migraphx::make_op("unique", {{"sorted", 0}, {"axis", 0}}), x);
auto y = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), out);
auto y_idx = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), out);
auto x_idx = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 2}}), out);
auto count = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 3}}), out);
mm->add_return({y, y_idx, x_idx, count});
auto prog = migraphx::parse_onnx("unique_unsorted_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(unknown_test) TEST_CASE(unknown_test)
{ {
migraphx::program p; migraphx::program p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment