Commit fae5170b authored by Shucai Xiao's avatar Shucai Xiao
Browse files

Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into ref_op_name

parents a3906038 2a79a9ff
......@@ -3,6 +3,7 @@
#include <migraphx/kernels/types.hpp>
#include <migraphx/kernels/integral_constant.hpp>
#include <migraphx/kernels/functional.hpp>
namespace migraphx {
......@@ -13,7 +14,7 @@ constexpr auto vec_size(vec<T, N>)
}
template <class T>
constexpr auto vec_size(T, ...)
constexpr auto vec_size(T, ...) // NOLINT
{
return index_constant<0>{};
}
......@@ -24,6 +25,38 @@ constexpr auto vec_size()
return decltype(vec_size(T{})){};
}
template <class... Ts>
constexpr auto is_any_vec()
{
if constexpr(sizeof...(Ts) == 0)
return false_type{};
else
return bool_constant<((vec_size<Ts>() + ...) > 0)>{};
}
template <class T, class I>
constexpr auto vec_at(T x, I i)
{
if constexpr(vec_size<T>() == 0)
return x;
else
{
MIGRAPHX_ASSERT(i < vec_size<T>());
return x[i];
}
}
template <class... Ts>
constexpr auto common_vec_size()
{
return fold([](auto x, auto y) {
if constexpr(x > y)
return x;
else
return y;
})(vec_size<Ts>()...);
}
template <index_int N, class T>
__device__ __host__ auto as_vec(T* x)
{
......@@ -33,5 +66,25 @@ __device__ __host__ auto as_vec(T* x)
return reinterpret_cast<vec<T, N>*>(x);
}
template <class... Ts>
constexpr auto vec_transform(Ts... xs)
{
return [=](auto f) {
if constexpr(is_any_vec<Ts...>())
{
using type = decltype(f(vec_at(xs, 0)...));
constexpr auto size = common_vec_size<Ts...>();
vec<type, size> result = {0};
for(int i = 0; i < size; i++)
result[i] = f(vec_at(xs, i)...);
return result;
}
else
{
return f(xs...);
}
};
}
} // namespace migraphx
#endif // MIGRAPHX_GUARD_KERNELS_VEC_HPP
......@@ -7,40 +7,70 @@
namespace migraphx {
template <class T>
constexpr auto tensor_vec_size(T)
constexpr auto tensor_vec_size()
{
return vec_size<typename T::type>();
}
template <index_int N, class Shape>
constexpr auto as_vec_shape(Shape s)
template <class T>
constexpr auto tensor_vec_size(T)
{
auto lens = transform(s.lens, s.strides, [](auto len, auto stride) {
if(stride == 1)
return len / N;
else
return len;
});
auto strides = transform(s.strides, [](auto stride) {
if(stride == 1)
return stride;
return stride / N;
return tensor_vec_size<T>();
}
template <index_int N, class Shape, class Axis>
constexpr auto shape_step(Shape s, Axis)
{
static_assert(N > 0, "Vector size must be non-zero");
return sequence(s.lens.size(), [&](auto... is) {
auto lens = transform(s.lens, index_ints<is...>{}, [&](auto i, auto j) {
constexpr auto axis = Axis::to();
MIGRAPHX_ASSERT(i != 0);
MIGRAPHX_ASSERT(j != axis or i % N == 0);
if(j == axis)
return i / N;
else
return i;
});
auto strides = transform(s.strides, index_ints<is...>{}, [&](auto i, auto j) {
constexpr auto axis = Axis::to();
// If stride of the axis is zero then we dont need to adjust the other strides
if(Shape{}.strides[axis] == 0)
return i;
MIGRAPHX_ASSERT(j == axis or i % N == 0);
if(j == axis)
return i;
else
return i / N;
});
MIGRAPHX_ASSERT(make_shape(lens, strides).elements() * N == s.elements());
MIGRAPHX_ASSERT(strides[Axis{}] == 0 or
make_shape(lens, strides).element_space() * N == s.element_space());
return make_shape(lens, strides);
});
MIGRAPHX_ASSERT(make_shape(lens, strides).element_space() * N == s.element_space());
return make_shape(lens, strides);
}
template <index_int N, class T>
__device__ __host__ auto as_vec(T x)
// Bools can not be used as a vector type so convert it to int8
template <class T>
__device__ __host__ T* remove_bool(T* x)
{
return x;
}
inline __device__ __host__ int8_t* remove_bool(bool* x) { return reinterpret_cast<int8_t*>(x); }
template <index_int N, class T, class Axis>
__device__ __host__ auto as_vec(T x, Axis axis)
{
if constexpr(N == 0)
return x;
else
return make_tensor_view(as_vec<N>(x.data()), as_vec_shape<N>(x.get_shape()));
return make_tensor_view(as_vec<N>(remove_bool(x.data())),
shape_step<N>(x.get_shape(), axis));
}
template <index_int N, class T, class Axis>
constexpr auto tensor_step(T x, Axis)
constexpr auto tensor_step(T x, Axis axis)
{
if constexpr(N == 0)
{
......@@ -49,17 +79,8 @@ constexpr auto tensor_step(T x, Axis)
else
{
constexpr auto s = decltype(x.get_shape()){};
MIGRAPHX_ASSERT(s.strides[Axis{}] == 0);
return sequence(x.get_shape().lens.size(), [&](auto... is) {
auto lens = transform(s.lens, index_ints<is...>{}, [&](auto i, auto j) {
constexpr auto axis = Axis{};
if(j == axis)
return i / N;
else
return i;
});
return make_tensor_view(x.data(), make_shape(lens, s.strides));
});
MIGRAPHX_ASSERT(s.strides[axis] == 0);
return make_tensor_view(x.data(), shape_step<N>(s, axis));
}
}
......@@ -69,42 +90,71 @@ __device__ __host__ auto as_vec(IntegralConstant ic, T&& x)
return as_vec<ic>(x);
}
template <class... Shapes>
constexpr index_int find_vector_axis(Shapes... ss)
template <class Shape>
constexpr index_int find_vector_axis_c(Shape s)
{
// Find the fastest axis that is not broadcasted
index_int axis = 0;
bool b = false;
for(index_int i = 1; i < s.lens.size(); i++)
{
if(s.strides[i] == 0)
continue;
if(s.strides[axis] == 0 or
pack_compare(less{}, pack(s.strides[i], s.lens[i]), pack(s.strides[axis], s.lens[axis])))
axis = i;
}
return axis;
}
template <class... Shapes>
constexpr index_int find_vector_axis_c(Shapes... ss)
{
const bool all_broadcasted = (ss.broadcasted() and ...);
index_int axis = 0;
bool b = false;
by([&](auto s) {
if(s.broadcasted() or b)
if(b)
return;
auto it = find(s.strides.begin(), s.strides.end(), 1);
if(it == s.strides.end())
// Skip broadcasted shapes if there are shapes not broadcasted
if(not all_broadcasted and s.broadcasted())
return;
axis = it - s.strides.begin();
b = true;
axis = find_vector_axis_c(s);
if(s.strides[axis] == 1)
b = true;
})(ss...);
if(not b)
return -1;
return axis;
}
template <class... Shapes>
constexpr auto find_vector_axis(Shapes...)
{
return _c<find_vector_axis_c(Shapes{}...)>;
}
template <index_int N, class Axis, class... Shapes>
constexpr auto is_vectorizable(Axis axis, Shapes... ss)
constexpr auto is_vectorizable_c(Axis axis, Shapes... ss)
{
return (((ss.lens[axis] % N) == 0 and (ss.strides[axis] == 1 or ss.strides[axis] == 0)) and
return ((axis < ss.lens.size() and ss.lens[axis] % N == 0 and
// Only vectorize broadcasted types with stride 0, since this causes issues in the
// preloader
((not ss.broadcasted() and ss.strides[axis] == 1) or ss.strides[axis] == 0)) and
...);
}
template <index_int N, class... Shapes>
constexpr bool is_vectorizable(Shapes... ss)
template <index_int N, class Axis, class... Shapes>
constexpr auto is_vectorizable(Axis, Shapes...)
{
return (is_vectorizable<N>(ss, find_vector_axis(ss)) and ...);
return _c<is_vectorizable_c<N>(Axis::to(), Shapes{}...)>;
}
template <class P>
constexpr auto find_vectorize_size(P pred)
{
if constexpr(pred(_c<4>))
if constexpr(decltype(pred(_c<4>)){})
return _c<4>;
else if constexpr(pred(_c<2>))
else if constexpr(decltype(pred(_c<2>)){})
return _c<2>;
else
return _c<0>;
......@@ -113,11 +163,12 @@ constexpr auto find_vectorize_size(P pred)
template <class T>
__host__ __device__ auto vectorize(T x)
{
if constexpr(vec_size<T>() == 0)
if constexpr(tensor_vec_size<T>() == 0)
{
constexpr auto axis = find_vector_axis(x.get_shape());
constexpr auto n =
find_vectorize_size([&](auto i) { return _c<is_vectorizable<i>(x.get_shape())>; });
return as_vec<n>(x);
find_vectorize_size([&](auto i) { return is_vectorizable<i>(axis, x.get_shape()); });
return as_vec<n>(x, axis);
}
else
{
......@@ -125,34 +176,46 @@ __host__ __device__ auto vectorize(T x)
}
}
template <class F, class... Ts>
inline __device__ __host__ auto auto_vectorize_impl(F f, Ts... xs)
{
// TODO: Just check there a single axis of 1
constexpr bool packed_or_broadcasted =
((xs.get_shape().packed() or xs.get_shape().broadcasted()) and ...);
if constexpr(packed_or_broadcasted)
{
constexpr auto axis = decltype(find_vector_axis(xs.get_shape()...)){};
constexpr auto n = find_vectorize_size(
[&](auto i) { return is_vectorizable<i>(axis, xs.get_shape()...); });
by(
[&](auto x) {
constexpr auto s = decltype(x.get_shape()){};
if constexpr(axis < s.strides.size())
{
MIGRAPHX_ASSERT(s.strides[axis] == 0 or s.strides[axis] == 1);
MIGRAPHX_ASSERT(s.lens[axis] > 0);
MIGRAPHX_ASSERT(n == 0 or s.lens[axis] % n == 0);
if constexpr(s.strides[axis] == 0)
return tensor_step<n>(x, axis);
else
return as_vec<n>(x, axis);
}
else
{
return x;
}
},
f)(xs...);
}
else
{
f(xs...);
}
}
inline __device__ __host__ auto auto_vectorize()
{
return [](auto... xs) {
return [=](auto f) {
// TODO: Just check there a single axis of 1
constexpr bool packed_or_broadcasted =
((xs.get_shape().packed() or xs.get_shape().broadcasted()) and ...);
if constexpr(packed_or_broadcasted)
{
constexpr auto axis = find_vector_axis(xs.get_shape()...);
constexpr auto n = find_vectorize_size(
[&](auto i) { return _c<is_vectorizable<i>(axis, xs.get_shape()...)>; });
by(
[&](auto x) {
constexpr auto s = x.get_shape();
if constexpr(s.strides[axis] == 0)
return tensor_step<n>(x, axis);
else
return as_vec<n>(x);
},
f)(xs...);
}
else
{
f(xs...);
}
};
};
return [](auto... xs) { return [=](auto f) { auto_vectorize_impl(f, xs...); }; };
}
} // namespace migraphx
......
......@@ -101,4 +101,38 @@ TEST_CASE(after_param_broadcast)
EXPECT(not m.get_output_shapes().back().broadcasted());
}
TEST_CASE(two_transpose_gather)
{
migraphx::module m1;
{
auto data = m1.add_parameter("2x2", {migraphx::shape::float_type, {2, 3, 4, 5}});
auto ind = m1.add_parameter("ind", {migraphx::shape::float_type, {2, 3}});
auto td = m1.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 3, 1}}}), data);
auto sd = m1.add_instruction(migraphx::make_op("softmax", {{"axis", 2}}), td);
auto bd =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 3, 1, 2}}}), sd);
auto r = m1.add_instruction(migraphx::make_op("gather", {{"axis", 2}}), bd, ind);
m1.add_return({r});
}
run_pass(m1);
migraphx::module m2;
{
auto data = m2.add_parameter("2x2", {migraphx::shape::float_type, {2, 3, 4, 5}});
auto ind = m2.add_parameter("ind", {migraphx::shape::float_type, {2, 3}});
auto td = m2.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 3, 1}}}), data);
auto ctd = m2.add_instruction(migraphx::make_op("contiguous"), td);
auto sd = m2.add_instruction(migraphx::make_op("softmax", {{"axis", 2}}), ctd);
auto bd =
m2.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 3, 1, 2}}}), sd);
auto cbd = m2.add_instruction(migraphx::make_op("contiguous"), bd);
auto r = m2.add_instruction(migraphx::make_op("gather", {{"axis", 2}}), cbd, ind);
m2.add_return({r});
}
EXPECT(m1 == m2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -73,6 +73,35 @@ TEST_CASE(double_add)
EXPECT(p1.sort() == p2.sort());
}
TEST_CASE(double_add_without_return)
{
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::program p1;
{
auto* mm = p1.get_main_module();
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto z = mm->add_parameter("z", s);
auto add1 = mm->add_instruction(migraphx::make_op("add"), x, y);
mm->add_instruction(migraphx::make_op("add"), add1, z);
}
run_pass(p1);
migraphx::program p2;
{
auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto z = mm->add_parameter("z", s);
auto fadd =
add_pointwise(p2, "main:pointwise0", {x, y, z}, [=](auto* pm, const auto& inputs) {
auto add1 = pm->add_instruction(migraphx::make_op("add"), inputs[0], inputs[1]);
return pm->add_instruction(migraphx::make_op("add"), add1, inputs[2]);
});
mm->add_instruction(migraphx::make_op("identity"), fadd);
}
EXPECT(p1.sort() == p2.sort());
}
TEST_CASE(used_twice_not_fused)
{
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
......
......@@ -1618,6 +1618,22 @@ def greater_bool_test():
return ([node1, node2], [x1, x2], [y])
@onnx_test
def greaterorequal_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [3])
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'GreaterOrEqual',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x1, x2], [y])
@onnx_test
def group_conv_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 4, 16, 16])
......@@ -1634,6 +1650,60 @@ def group_conv_test():
return ([node], [x, y], [z])
@onnx_test
def hardsigmoid_default_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 3, 4, 5])
node = onnx.helper.make_node('HardSigmoid', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def hardsigmoid_double_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [1, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.DOUBLE, [1, 3, 4, 5])
node = onnx.helper.make_node('HardSigmoid',
inputs=['x'],
outputs=['y'],
alpha=0.3,
beta=0.7)
return ([node], [x], [y])
@onnx_test
def hardsigmoid_half_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [1, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [1, 3, 4, 5])
node = onnx.helper.make_node('HardSigmoid', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def hardsigmoid_verify_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 5])
node = onnx.helper.make_node('HardSigmoid', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def hardswish_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 5])
node = onnx.helper.make_node('HardSwish', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def if_else_test():
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
......@@ -2692,6 +2762,80 @@ def maxpool_same_upper_test():
return ([node], [x], [y])
@onnx_test
def mean_broadcast_test():
data_0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4])
data_1 = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 2, 3, 4])
data_2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [4])
data_3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1])
data_4 = helper.make_tensor_value_info('4', TensorProto.FLOAT, [2, 3, 1])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT,
[1, 2, 3, 4])
node = onnx.helper.make_node("Mean",
inputs=["0", "1", "2", "3", "4"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2, data_3, data_4], [mean])
@onnx_test
def mean_fp16_test():
data_0 = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [1, 2, 3])
data_1 = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [1, 2, 3])
data_2 = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [1, 2, 3])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT16,
[1, 2, 3])
node = onnx.helper.make_node("Mean",
inputs=["0", "1", "2"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2], [mean])
@onnx_test
def mean_invalid_broadcast_test():
data_0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 3])
data_1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 2, 3])
data_2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 2, 4])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [1, 2, 3])
node = onnx.helper.make_node("Mean",
inputs=["0", "1", "2"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2], [mean])
@onnx_test
def mean_single_input_test():
data_0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 3])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [1, 2, 3])
node = onnx.helper.make_node("Mean", inputs=["0"], outputs=["mean"])
return ([node], [data_0], [mean])
@onnx_test
def mean_test():
data = [
helper.make_tensor_value_info(str(i), TensorProto.DOUBLE, [2, 2, 2])
for i in range(10)
]
data_names = [str(i) for i in range(10)]
mean = helper.make_tensor_value_info('mean', TensorProto.DOUBLE, [2, 2, 2])
node = onnx.helper.make_node("Mean", inputs=data_names, outputs=["mean"])
return ([node], data, [mean])
@onnx_test
def min_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
......@@ -2725,6 +2869,21 @@ def multinomial_test():
return ([node], [input], [output])
@onnx_test
def multinomial_generated_seed_test():
sample_size = 10
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
node = onnx.helper.make_node('Multinomial',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
@onnx_test
def multinomial_dtype_error_test():
sample_size = 10
......@@ -3176,6 +3335,21 @@ def randomnormal_dtype_error_test():
return ([node], [], [output])
@onnx_test
def randomnormal_generated_seed_test():
sample_size = 10
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
node = onnx.helper.make_node('RandomNormal',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
@onnx_test
def randomnormal_shape_error_test():
dtype = 1
......@@ -3266,6 +3440,21 @@ def randomuniform_dtype_error_test():
return ([node], [], [output])
@onnx_test
def randomuniform_generated_seed_test():
sample_size = 10
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
node = onnx.helper.make_node('RandomUniform',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
@onnx_test
def randomuniform_shape_error_test():
dtype = 1
......@@ -4290,6 +4479,44 @@ def softmax_nonstd_input_test():
return ([node0, node1], [x], [y])
@onnx_test
def softsign_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5])
node = onnx.helper.make_node('Softsign', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
def softplus_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5])
node = onnx.helper.make_node('Softplus', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def softsign_nd_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [3, 4, 5])
node = onnx.helper.make_node('Softsign', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
def softplus_nd_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [3, 4, 5])
node = onnx.helper.make_node('Softplus', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def split_minus_axis_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
......@@ -4847,6 +5074,25 @@ def unknown_aten_test():
return ([node], [x, y], [a])
@onnx_test
def upsample_linear_test():
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
scales_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(
np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
node = onnx.helper.make_node('Upsample',
inputs=['X', '', 'scales'],
outputs=['Y'],
mode='linear')
return ([node], [X], [Y], [scales_tensor])
@onnx_test
def upsample_test():
scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)
......
No preview for this file type
greaterorequal_test:g

x1
x2y"GreaterOrEqualgreaterorequal_testZ
x1

Z
x2

b
y

B
\ No newline at end of file
hardsigmoid_default_test:i

xy" HardSigmoidhardsigmoid_default_testZ
x




b
y




B
\ No newline at end of file
hardsigmoid_double_test:
4
xy" HardSigmoid*
alpha>*
beta333?hardsigmoid_double_testZ
x
 



b
y
 



B
\ No newline at end of file
hardsigmoid_half_test:f

xy" HardSigmoidhardsigmoid_half_testZ
x





b
y





B
\ No newline at end of file
hardsigmoid_verify_test:X

xy" HardSigmoidhardsigmoid_verify_testZ
x


b
y


B
\ No newline at end of file
hardswish_test:M

xy" HardSwishhardswish_testZ
x


b
y


B
\ No newline at end of file
mean_broadcast_test:Ã

0
1
2
3
4mean"Meanmean_broadcast_testZ
0



Z
1




Z
2

Z
3

Z
4



b
mean




B
\ No newline at end of file
mean_fp16_test:Ž

0
1
2mean"Meanmean_fp16_testZ
0




Z
1




Z
2




b
mean




B
\ No newline at end of file
mean_invalid_broadcast_test:›

0
1
2mean"Meanmean_invalid_broadcast_testZ
0



Z
1



Z
2



b
mean



B
\ No newline at end of file
mean_single_input_test:^

0mean"Meanmean_single_input_testZ
0



b
mean



B
\ No newline at end of file
 mean_test:Í
*
0
1
2
3
4
5
6
7
8
9mean"Mean mean_testZ
0
 


Z
1
 


Z
2
 


Z
3
 


Z
4
 


Z
5
 


Z
6
 


Z
7
 


Z
8
 


Z
9
 


b
mean
 


B
\ No newline at end of file
multinomial_generated_seed_test:
0
inputoutput" Multinomial*
sample_size
multinomial_generated_seed_testZ
input



b
output



B
\ No newline at end of file
......@@ -1549,6 +1549,24 @@ TEST_CASE(greater_bool_test)
EXPECT(p == prog);
}
TEST_CASE(greaterorequal_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input1 = mm->add_parameter("x1", migraphx::shape{migraphx::shape::float_type, {3}});
auto input2 = mm->add_parameter("x2", migraphx::shape{migraphx::shape::float_type, {3}});
auto temp = mm->add_instruction(migraphx::make_op("less"), input1, input2);
auto bt = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::bool_type}}), temp);
auto ge = mm->add_instruction(migraphx::make_op("not"), bt);
mm->add_return({ge});
auto prog = migraphx::parse_onnx("greaterorequal_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(group_conv_test)
{
migraphx::program p;
......@@ -1563,6 +1581,140 @@ TEST_CASE(group_conv_test)
EXPECT(p == prog);
}
TEST_CASE(hardsigmoid_default_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{1, 3, 4, 5};
auto input_type = migraphx::shape::float_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
float alpha = 0.2;
float beta = 0.5;
auto mb_alpha = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {alpha}}));
auto mb_beta = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {beta}}));
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {0}}));
auto mb_one =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto mul = mm->add_instruction(migraphx::make_op("mul"), mb_alpha, x);
auto add = mm->add_instruction(migraphx::make_op("add"), mb_beta, mul);
mm->add_instruction(migraphx::make_op("clip"), add, mb_zero, mb_one);
auto prog = optimize_onnx("hardsigmoid_default_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(hardsigmoid_double_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{1, 3, 4, 5};
auto input_type = migraphx::shape::double_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
float alpha = 0.3;
float beta = 0.7;
auto mb_alpha = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {alpha}}));
auto mb_beta = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {beta}}));
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {0}}));
auto mb_one =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto mul = mm->add_instruction(migraphx::make_op("mul"), mb_alpha, x);
auto add = mm->add_instruction(migraphx::make_op("add"), mb_beta, mul);
mm->add_instruction(migraphx::make_op("clip"), add, mb_zero, mb_one);
auto prog = optimize_onnx("hardsigmoid_double_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(hardsigmoid_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{1, 3, 4, 5};
auto input_type = migraphx::shape::half_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
float alpha = 0.2;
float beta = 0.5;
auto mb_alpha = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {alpha}}));
auto mb_beta = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {beta}}));
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {0}}));
auto mb_one =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto mul = mm->add_instruction(migraphx::make_op("mul"), mb_alpha, x);
auto add = mm->add_instruction(migraphx::make_op("add"), mb_beta, mul);
mm->add_instruction(migraphx::make_op("clip"), add, mb_zero, mb_one);
auto prog = optimize_onnx("hardsigmoid_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(hardswish_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{2, 5};
auto input_type = migraphx::shape::float_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
float alpha = 1.0 / 6.0;
float beta = 0.5;
auto mb_alpha = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {alpha}}));
auto mb_beta = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {beta}}));
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {0}}));
auto mb_one =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto mul = mm->add_instruction(migraphx::make_op("mul"), mb_alpha, x);
auto add = mm->add_instruction(migraphx::make_op("add"), mb_beta, mul);
auto hardsigmoid = mm->add_instruction(migraphx::make_op("clip"), add, mb_zero, mb_one);
mm->add_instruction(migraphx::make_op("mul"), x, hardsigmoid);
auto prog = optimize_onnx("hardswish_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(if_else_test)
{
migraphx::program p;
......@@ -2340,6 +2492,50 @@ TEST_CASE(maxpool_same_upper_test)
EXPECT(p == prog);
}
TEST_CASE(mean_invalid_broadcast_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("mean_invalid_broadcast_test.onnx"); }));
}
TEST_CASE(mean_single_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto data0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 3}});
mm->add_return({data0});
auto prog = migraphx::parse_onnx("mean_single_input_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(mean_test)
{
const std::size_t num_data = 3;
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::half_type, {1, 2, 3}};
auto data0 = mm->add_parameter("0", s);
auto data1 = mm->add_parameter("1", s);
auto data2 = mm->add_parameter("2", s);
auto div_lit = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {num_data}});
auto divisor =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), div_lit);
auto mean = mm->add_instruction(migraphx::make_op("div"), data0, divisor);
divisor =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), div_lit);
data1 = mm->add_instruction(migraphx::make_op("div"), data1, divisor);
mean = mm->add_instruction(migraphx::make_op("add"), mean, data1);
divisor =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), div_lit);
data2 = mm->add_instruction(migraphx::make_op("div"), data2, divisor);
mean = mm->add_instruction(migraphx::make_op("add"), mean, data2);
auto prog = optimize_onnx("mean_fp16_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(min_test)
{
migraphx::program p;
......@@ -2388,6 +2584,14 @@ TEST_CASE(multinomial_dtype_error_test)
EXPECT(test::throws([&] { migraphx::parse_onnx("multinomial_dtype_error_test.onnx"); }));
}
TEST_CASE(multinomial_generated_seed_test)
{
auto p1 = optimize_onnx("multinomial_generated_seed_test.onnx");
auto p2 = optimize_onnx("multinomial_generated_seed_test.onnx");
EXPECT(p1 != p2);
}
TEST_CASE(multinomial_int64_test)
{
migraphx::program p;
......@@ -2891,6 +3095,14 @@ TEST_CASE(randomnormal_dtype_error_test)
EXPECT(test::throws([&] { migraphx::parse_onnx("randomnormal_dtype_error_test.onnx"); }));
}
TEST_CASE(randomnormal_generated_seed_test)
{
auto p1 = optimize_onnx("randomnormal_generated_seed_test.onnx");
auto p2 = optimize_onnx("randomnormal_generated_seed_test.onnx");
EXPECT(p1 != p2);
}
TEST_CASE(randomnormal_shape_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("randomnormal_shape_error_test.onnx"); }));
......@@ -2953,6 +3165,14 @@ TEST_CASE(randomuniform_dtype_error_test)
EXPECT(test::throws([&] { migraphx::parse_onnx("randomuniform_dtype_error_test.onnx"); }));
}
TEST_CASE(randomuniform_generated_seed_test)
{
auto p1 = optimize_onnx("randomuniform_generated_seed_test.onnx");
auto p2 = optimize_onnx("randomuniform_generated_seed_test.onnx");
EXPECT(p1 != p2);
}
TEST_CASE(randomuniform_shape_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("randomuniform_shape_error_test.onnx"); }));
......@@ -3423,7 +3643,7 @@ TEST_CASE(resize_nonstd_input_test)
EXPECT(p == prog);
}
TEST_CASE(resize_upsample_linear_ac_test)
static auto create_upsample_linear_prog()
{
migraphx::program p;
auto* mm = p.get_main_module();
......@@ -3514,6 +3734,12 @@ TEST_CASE(resize_upsample_linear_ac_test)
auto add1 = mm->add_instruction(migraphx::make_op("add"), mul1, slc10);
mm->add_return({add1});
return p;
}
TEST_CASE(resize_upsample_linear_ac_test)
{
auto p = create_upsample_linear_prog();
auto prog = migraphx::parse_onnx("resize_upsample_linear_ac_test.onnx");
EXPECT(p == prog);
}
......@@ -3972,6 +4198,86 @@ TEST_CASE(softmax_nonstd_input_test)
EXPECT(p == prog);
}
TEST_CASE(softplus_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{5};
auto input_type = migraphx::shape::float_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
auto mb_ones =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto exp = mm->add_instruction(migraphx::make_op("exp"), x);
auto add = mm->add_instruction(migraphx::make_op("add"), exp, mb_ones);
mm->add_instruction(migraphx::make_op("log"), add);
auto prog = optimize_onnx("softplus_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(softplus_nd_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{3, 4, 5};
auto input_type = migraphx::shape::half_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
auto mb_ones =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto exp = mm->add_instruction(migraphx::make_op("exp"), x);
auto add = mm->add_instruction(migraphx::make_op("add"), exp, mb_ones);
mm->add_instruction(migraphx::make_op("log"), add);
auto prog = optimize_onnx("softplus_nd_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(softsign_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{5};
auto input_type = migraphx::shape::float_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
auto mb_ones =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto abs = mm->add_instruction(migraphx::make_op("abs"), x);
auto add = mm->add_instruction(migraphx::make_op("add"), abs, mb_ones);
mm->add_instruction(migraphx::make_op("div"), x, add);
auto prog = optimize_onnx("softsign_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(softsign_nd_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{3, 4, 5};
auto input_type = migraphx::shape::half_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
auto mb_ones =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto abs = mm->add_instruction(migraphx::make_op("abs"), x);
auto add = mm->add_instruction(migraphx::make_op("add"), abs, mb_ones);
mm->add_instruction(migraphx::make_op("div"), x, add);
auto prog = optimize_onnx("softsign_nd_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(split_minus_axis_test)
{
migraphx::program p;
......@@ -4453,6 +4759,13 @@ TEST_CASE(unknown_test_throw)
EXPECT(test::throws([&] { migraphx::parse_onnx("unknown_test.onnx"); }));
}
TEST_CASE(upsample_linear_test)
{
auto p = create_upsample_linear_prog();
auto prog = migraphx::parse_onnx("upsample_linear_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(upsample_test)
{
migraphx::program p;
......
 randomnormal_generated_seed_test:
1
inputoutput" RandomNormal*
sample_size
 randomnormal_generated_seed_testZ
input



b
output



B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment